Compare commits
	
		
			921 Commits
		
	
	
		
			2013.12.23
			...
			2014.03.21
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 1f91bd15c3 | ||
|   | 11a15be4ce | ||
|   | 14e17e18cb | ||
|   | 1b124d1942 | ||
|   | 747373d4ae | ||
|   | 18d367c0a5 | ||
|   | a1a530b067 | ||
|   | cb9722cb3f | ||
|   | 773c0b4bb8 | ||
|   | 23c322a531 | ||
|   | 7e8c0af004 | ||
|   | d2983ccb25 | ||
|   | f24e9833dc | ||
|   | bc2bdf5709 | ||
|   | 627a209f74 | ||
|   | 1a4895453a | ||
|   | aab74fa106 | ||
|   | 2bd9efd4c2 | ||
|   | 39a743fb9b | ||
|   | 4966a0b22d | ||
|   | fc26023120 | ||
|   | 8d7c0cca13 | ||
|   | f66ede4328 | ||
|   | cc88b90ec8 | ||
|   | b6c5fa9a0b | ||
|   | dff10eaa77 | ||
|   | 4e6f9aeca1 | ||
|   | e68301af21 | ||
|   | 17286a96f2 | ||
|   | 0892363e6d | ||
|   | f102372b5f | ||
|   | ecbe1ad207 | ||
|   | 9d840c43b5 | ||
|   | 6f50f63382 | ||
|   | ff14fc4964 | ||
|   | e125c21531 | ||
|   | 93d020dd65 | ||
|   | a7515ec265 | ||
|   | b6c1ceccc2 | ||
|   | 4056ad8f36 | ||
|   | 6563837ee1 | ||
|   | fd5e6f7ef2 | ||
|   | 15fd51b37c | ||
|   | f1cef7a9ff | ||
|   | 8264223511 | ||
|   | bc6d597828 | ||
|   | aba77bbfc2 | ||
|   | 955c451456 | ||
|   | e5de3f6c89 | ||
|   | 2a1db721d4 | ||
|   | 1e0eb60f1a | ||
|   | 87a29e6f25 | ||
|   | c3d36f134f | ||
|   | 84769e708c | ||
|   | 9d2ecdbc71 | ||
|   | 9b69af5342 | ||
|   | c21215b421 | ||
|   | cddcfd90b4 | ||
|   | f36aacba0f | ||
|   | 355271fb61 | ||
|   | 2a5b502364 | ||
|   | 98ff9d82d4 | ||
|   | b1ff87224c | ||
|   | b461641fb9 | ||
|   | b047de6f6e | ||
|   | 34ca5d9ba0 | ||
|   | 60cc4dc4b4 | ||
|   | db95dc13a1 | ||
|   | 777ac90791 | ||
|   | 04f9bebbcb | ||
|   | 4ea3137e41 | ||
|   | a0792b738e | ||
|   | 19a41fc613 | ||
|   | 3ee52157fb | ||
|   | c4d197ee2d | ||
|   | a33932cfe3 | ||
|   | bcf89ce62c | ||
|   | e3899d0e00 | ||
|   | dcb00da49c | ||
|   | aa51d20d19 | ||
|   | ae7ed92057 | ||
|   | e45b31d9bd | ||
|   | 5a25f39653 | ||
|   | 963d7ec412 | ||
|   | e712d94adf | ||
|   | 6a72423955 | ||
|   | 4126826b10 | ||
|   | b773ead7fd | ||
|   | 855e2750bc | ||
|   | 805ef3c60b | ||
|   | fbc2dcb40b | ||
|   | 5375d7ad84 | ||
|   | 90f3476180 | ||
|   | ee95c09333 | ||
|   | 75d06db9fc | ||
|   | 439a1fffcb | ||
|   | 9d9d70c462 | ||
|   | b4a186b7be | ||
|   | bdebf51c8f | ||
|   | 264b86f9b4 | ||
|   | 9e55e37a2e | ||
|   | 1471956573 | ||
|   | 27865b2169 | ||
|   | 6d07ce0162 | ||
|   | edb7fc5435 | ||
|   | 31f77343f2 | ||
|   | 63ad031583 | ||
|   | 957688cee6 | ||
|   | 806d6c2e8c | ||
|   | 0ef68e04d9 | ||
|   | a496524db2 | ||
|   | 935c7360cc | ||
|   | 340b046876 | ||
|   | cc1db7f9b7 | ||
|   | a4ff6c4762 | ||
|   | 1060425cbb | ||
|   | e9c092f125 | ||
|   | 22ff5d2105 | ||
|   | 136db7881b | ||
|   | dae313e725 | ||
|   | b74fa8cd2c | ||
|   | 94eae04c94 | ||
|   | 16ff7ebc77 | ||
|   | c361c505b0 | ||
|   | d37c07c575 | ||
|   | 9d6105c9f0 | ||
|   | 8dec03ecba | ||
|   | 826547870b | ||
|   | 52d6a9a61d | ||
|   | ad242b5fbc | ||
|   | 3524175625 | ||
|   | 7b9965ea93 | ||
|   | 0a5bce566f | ||
|   | 8012bd2424 | ||
|   | f55a1f0a88 | ||
|   | bacac173a9 | ||
|   | ca1fee34f2 | ||
|   | 6dadaa9930 | ||
|   | 553f6e4633 | ||
|   | 652bee05f0 | ||
|   | d63516e9cd | ||
|   | e477dcf649 | ||
|   | 9d3f7781f3 | ||
|   | c7095dada3 | ||
|   | 607dbbad76 | ||
|   | 17b75c0de1 | ||
|   | ab24f4f3be | ||
|   | e1a52d9e10 | ||
|   | d0ff838433 | ||
|   | b37b94501c | ||
|   | cb3bb2cfef | ||
|   | e2cc7983e9 | ||
|   | c9ae7b9565 | ||
|   | 86fb4347f7 | ||
|   | 2fcec131f5 | ||
|   | 9f62eaf4ef | ||
|   | f92259c026 | ||
|   | 0afef30b23 | ||
|   | dcdfd1c711 | ||
|   | 2acc1f8f50 | ||
|   | 2c39b0c695 | ||
|   | e77c5b4f63 | ||
|   | 409a16cb72 | ||
|   | 94d5e90b4f | ||
|   | 2d73b45805 | ||
|   | 271a2dbfa2 | ||
|   | bf4adcac66 | ||
|   | fb8b8fdd62 | ||
|   | 5a0b26252e | ||
|   | 7d78f0cc48 | ||
|   | f00fc78674 | ||
|   | 392017874c | ||
|   | c3cb92d1ab | ||
|   | aa5590fa07 | ||
|   | 8cfb5bbf92 | ||
|   | 69bb54ebf9 | ||
|   | ca97a56e4b | ||
|   | fc26f3b4c2 | ||
|   | f604c93c64 | ||
|   | dc3727b65c | ||
|   | aba3231de1 | ||
|   | 9193bab91d | ||
|   | fbcf3e416d | ||
|   | c0e5d85631 | ||
|   | ca7fa3dcb3 | ||
|   | 4ccfba28d9 | ||
|   | abb82f1ddc | ||
|   | cda008cff1 | ||
|   | 1877a14049 | ||
|   | 546582ec3e | ||
|   | 4534485586 | ||
|   | a9ab8855e4 | ||
|   | 8a44ef6868 | ||
|   | 0c7214c404 | ||
|   | 4cf9654693 | ||
|   | 50a138d95c | ||
|   | 1b86cc41cf | ||
|   | 91346358b0 | ||
|   | f3783d4b77 | ||
|   | 89ef304bed | ||
|   | 83cebb8b7a | ||
|   | 9e68f9fdf1 | ||
|   | 2acea5c03d | ||
|   | 978177527e | ||
|   | 2648c436f3 | ||
|   | 33f1f2c455 | ||
|   | 995befe0e9 | ||
|   | 1bb92aff55 | ||
|   | b8e1471d3a | ||
|   | 60daf7f0bb | ||
|   | a83a3139d1 | ||
|   | fdb7ca3b8d | ||
|   | 0d7caf5cdf | ||
|   | a339d7ba91 | ||
|   | 7216de55d6 | ||
|   | 2437fbca64 | ||
|   | 7d75d06b78 | ||
|   | 13ef5648c4 | ||
|   | 5b2478e2ba | ||
|   | 8b286571c3 | ||
|   | f3ac523794 | ||
|   | 020cf5ebfd | ||
|   | 54ab193970 | ||
|   | 8f563f32ab | ||
|   | 151bae3566 | ||
|   | 76df418cba | ||
|   | d0a72674c6 | ||
|   | 1d430674c7 | ||
|   | 70cb73922b | ||
|   | 344400951c | ||
|   | ea5a0be811 | ||
|   | 3c7fd0bdb2 | ||
|   | 6cadf8c858 | ||
|   | 27579b9e4c | ||
|   | 4d756a9cc0 | ||
|   | 3e668e05be | ||
|   | 60d3a2e0f8 | ||
|   | cc3a3b6b47 | ||
|   | eda1d49a62 | ||
|   | 62e609ab77 | ||
|   | 2bfe4ead4b | ||
|   | b1c6c32f78 | ||
|   | f6acbdecf4 | ||
|   | f1c9dfcc01 | ||
|   | ce78943ae1 | ||
|   | d6f0d86649 | ||
|   | 5bb67dbfea | ||
|   | 47610c4d3e | ||
|   | b732f3581f | ||
|   | 9e57ce716f | ||
|   | cd7ee7aa44 | ||
|   | 3cfe791473 | ||
|   | 973f2532f5 | ||
|   | bc3be21d59 | ||
|   | 0bf5cf9886 | ||
|   | 919052d094 | ||
|   | a2dafe2887 | ||
|   | 92661c994b | ||
|   | ffe8fe356a | ||
|   | bc2f773b4f | ||
|   | f919201ecc | ||
|   | 7ff5d5c2e2 | ||
|   | 9b77f951c7 | ||
|   | a25f2f990a | ||
|   | 78b373975d | ||
|   | 2fcc873c4c | ||
|   | 23c2baadb3 | ||
|   | 521ee82334 | ||
|   | 1df96e59ce | ||
|   | 3e123c1e28 | ||
|   | f38da66731 | ||
|   | 06aabfc422 | ||
|   | 1052d2bfec | ||
|   | 5e0b652344 | ||
|   | 0f8f097183 | ||
|   | 491ed3dda2 | ||
|   | af284c6d1b | ||
|   | 41d3ec5fba | ||
|   | 0568c352f3 | ||
|   | 2e7b4cb714 | ||
|   | 9767726b66 | ||
|   | 9ddfd84e41 | ||
|   | 1cf563d84b | ||
|   | 7928024f57 | ||
|   | 3eb38acb43 | ||
|   | f7300c5c90 | ||
|   | 3489b7d26c | ||
|   | acd2bcc384 | ||
|   | 43e77ca455 | ||
|   | da36297988 | ||
|   | dbb94fb044 | ||
|   | d68f0cdb23 | ||
|   | eae16eb67b | ||
|   | 4fc946b546 | ||
|   | 280bc5dad6 | ||
|   | f43770d8c9 | ||
|   | 98c4b8fa1b | ||
|   | ccb079ee67 | ||
|   | 2ea237472c | ||
|   | 0d4b4865cc | ||
|   | fe52f9f956 | ||
|   | 882907a818 | ||
|   | 572a89cc4e | ||
|   | c377110539 | ||
|   | a9c7198a0b | ||
|   | f6f01ea17b | ||
|   | f2d0fc6823 | ||
|   | f7000f3a1b | ||
|   | c7f0177fa7 | ||
|   | 09c4d50944 | ||
|   | 2eb5d315d4 | ||
|   | ad5976b4d9 | ||
|   | a0dfcdce5e | ||
|   | 96d1637082 | ||
|   | 960f317171 | ||
|   | 4412ca751d | ||
|   | cbffec0c95 | ||
|   | 0cea52cc18 | ||
|   | 6d784e87f4 | ||
|   | ae6cae78f1 | ||
|   | 0f99566c01 | ||
|   | 2db806b4aa | ||
|   | 3f32c0ba4c | ||
|   | 541cb26c0d | ||
|   | 5544e038ab | ||
|   | 9032dc28a6 | ||
|   | 03635e2a71 | ||
|   | 00cf938aa5 | ||
|   | a5f707c495 | ||
|   | 1824b48169 | ||
|   | 07ad22b8af | ||
|   | b53466e168 | ||
|   | 6a7a389679 | ||
|   | 4edff78531 | ||
|   | 99043c2ea5 | ||
|   | e68abba910 | ||
|   | 3165dc4d9f | ||
|   | 66c43a53e4 | ||
|   | 463b334616 | ||
|   | b71dbc57c4 | ||
|   | 72ca1d7f45 | ||
|   | 76e461f395 | ||
|   | 1074982e6e | ||
|   | 29b2aaf035 | ||
|   | 6f90d098c5 | ||
|   | 0715161450 | ||
|   | 896583517f | ||
|   | 713d31fac8 | ||
|   | 96cb10a5f5 | ||
|   | c207c1044e | ||
|   | 79629ec717 | ||
|   | 008fda0f08 | ||
|   | 0ae6b01937 | ||
|   | def630e523 | ||
|   | c5ba203e23 | ||
|   | 2317e6b2b3 | ||
|   | cb38928974 | ||
|   | fa78f13302 | ||
|   | 18395217c4 | ||
|   | 34bd987811 | ||
|   | af6ba6a1c4 | ||
|   | 85409a0c69 | ||
|   | ebfe352b62 | ||
|   | fde56d2f17 | ||
|   | 3501423dfe | ||
|   | 0de668af51 | ||
|   | 2a584ea90a | ||
|   | 0f6ed94a15 | ||
|   | bcb891e82b | ||
|   | ac6e4ca1ed | ||
|   | 2e20bba708 | ||
|   | e70dc1d14b | ||
|   | 0793a7b3c7 | ||
|   | 026fcc0495 | ||
|   | 81c2f20b53 | ||
|   | 1afe753462 | ||
|   | 524c2c716a | ||
|   | b542d4bbd7 | ||
|   | cf1eb45153 | ||
|   | a97bcd80ba | ||
|   | 17968e444c | ||
|   | 2e3fd9ec2f | ||
|   | d6a283b025 | ||
|   | 9766538124 | ||
|   | 98dbee8681 | ||
|   | e421491b3b | ||
|   | 6828d37c41 | ||
|   | bf5f610099 | ||
|   | 8b7f73404a | ||
|   | 85cacb2f51 | ||
|   | b3fa3917e2 | ||
|   | 082c6c867a | ||
|   | 03fcf1ab57 | ||
|   | 3b00dea5eb | ||
|   | 8bc6c8e3c0 | ||
|   | 79bc27b53a | ||
|   | 84dd703199 | ||
|   | c6fdba23a6 | ||
|   | b19fe521a9 | ||
|   | c1e672d121 | ||
|   | f4371f4784 | ||
|   | d914d9d187 | ||
|   | 845d14d377 | ||
|   | 4a9540b6d2 | ||
|   | 9f31be7000 | ||
|   | 41fa1b627d | ||
|   | c0c4e66b29 | ||
|   | cd8662de22 | ||
|   | 3587159614 | ||
|   | d67cc9fa7c | ||
|   | bf3a2fe923 | ||
|   | e9ea0bf123 | ||
|   | 63424b6233 | ||
|   | 0bf35c5cf5 | ||
|   | 95c29381eb | ||
|   | 94c4abce7f | ||
|   | f2dffe55f8 | ||
|   | 46a073bfac | ||
|   | df872ec4e7 | ||
|   | 5de90176d9 | ||
|   | dcf3eec47a | ||
|   | e9e4f30d26 | ||
|   | 83cebd73d4 | ||
|   | 1df4229bd7 | ||
|   | 3c995527e9 | ||
|   | 7c62b568a2 | ||
|   | ccf9114e84 | ||
|   | d8061908bb | ||
|   | 211e17dd43 | ||
|   | 6cb38a9994 | ||
|   | fa7df757a7 | ||
|   | 8c82077619 | ||
|   | e5d1f9e50a | ||
|   | 7ee50ae7b5 | ||
|   | de563c9da0 | ||
|   | 50451f2a18 | ||
|   | 9bc70948e1 | ||
|   | 5dc733f071 | ||
|   | bc4850908c | ||
|   | 20650c8654 | ||
|   | 56dced2670 | ||
|   | eef726c04b | ||
|   | acf1555d76 | ||
|   | 22e7f1a6ec | ||
|   | 3c49325658 | ||
|   | bb1cd2bea1 | ||
|   | fdf1f8d4ce | ||
|   | 117c8c6b97 | ||
|   | 5cef4ff09b | ||
|   | 91264ce572 | ||
|   | c79ef8e1ae | ||
|   | 58d915df51 | ||
|   | 7881a64499 | ||
|   | 90159f5561 | ||
|   | 99877772d0 | ||
|   | b0268cb6ce | ||
|   | 4edff4cfa8 | ||
|   | 1eac553e7e | ||
|   | 9d3ac7444d | ||
|   | 588128d054 | ||
|   | 8e93b9b9aa | ||
|   | b4bcffefa3 | ||
|   | 2b39af9b4f | ||
|   | 23fe495feb | ||
|   | b5dbe89bba | ||
|   | dbe80ca7ad | ||
|   | 009a3408f5 | ||
|   | b58e3c8918 | ||
|   | 56b6faf91e | ||
|   | 7ac1f877a7 | ||
|   | d55433bbfd | ||
|   | f0ce2bc1c5 | ||
|   | c3bc00b90e | ||
|   | ff6b7b049b | ||
|   | f46359121f | ||
|   | 37c1525c17 | ||
|   | c85e4cf7b4 | ||
|   | c66dcda287 | ||
|   | 6d845922ab | ||
|   | 2949cbe036 | ||
|   | c3309a7774 | ||
|   | 7aed837595 | ||
|   | 0eb799bae9 | ||
|   | 4baff4a4ae | ||
|   | 45d7bc2f8b | ||
|   | c0c2ddddcd | ||
|   | a96ed91610 | ||
|   | c1206423c4 | ||
|   | 659aa21ba1 | ||
|   | efd02e858a | ||
|   | 3bf8bc7f37 | ||
|   | 8ccda826d5 | ||
|   | b9381e43c2 | ||
|   | fcdea2666d | ||
|   | c4db377cbb | ||
|   | 90dc5e8693 | ||
|   | c81a855b0f | ||
|   | c8d8ec8567 | ||
|   | 4f879a5be0 | ||
|   | 1a0648b4a9 | ||
|   | 3c1b4669d0 | ||
|   | 24b3d5e538 | ||
|   | ab083b08ab | ||
|   | 89acb96927 | ||
|   | 79752e18b1 | ||
|   | 55b41c723c | ||
|   | 9f8928d032 | ||
|   | 3effa7ceaa | ||
|   | ed9cc2f1e0 | ||
|   | 975fa541c2 | ||
|   | 251974e44c | ||
|   | 38a40276ec | ||
|   | 57b6288358 | ||
|   | c3f51436bf | ||
|   | 0c708f11cb | ||
|   | fb2a706d11 | ||
|   | 0b76600deb | ||
|   | 245b612a36 | ||
|   | d882161d5a | ||
|   | d4a21e0b49 | ||
|   | 26a78d4bbf | ||
|   | 8db69786c2 | ||
|   | b11cec4162 | ||
|   | 7eeb5bef24 | ||
|   | 9d2032932c | ||
|   | 6490306017 | ||
|   | ceb2b7d257 | ||
|   | 459a53c2c2 | ||
|   | adc267eebf | ||
|   | ffe8f62d27 | ||
|   | ed85007039 | ||
|   | 5aaca50d60 | ||
|   | 869baf3565 | ||
|   | e299f6d27f | ||
|   | 4a192f817e | ||
|   | bc1d1a5a71 | ||
|   | 456895d9cf | ||
|   | 218c15ab59 | ||
|   | 17ab4d3b5e | ||
|   | 31ef0ff038 | ||
|   | 37e3b90d59 | ||
|   | 00ff8f92a5 | ||
|   | 4857beba3a | ||
|   | c1e60cc2bf | ||
|   | 98669ed79c | ||
|   | a3978a6159 | ||
|   | e3a9f32f52 | ||
|   | 87fac3238d | ||
|   | a2fb2a2134 | ||
|   | 9e8ee54553 | ||
|   | 117bec936c | ||
|   | 1547c8cc88 | ||
|   | 075911d48e | ||
|   | b21a918984 | ||
|   | f9b8549609 | ||
|   | d1b30713fb | ||
|   | e2ba07024f | ||
|   | 9b05bd42e5 | ||
|   | b6d3a99678 | ||
|   | 96d7b8873a | ||
|   | efc867775e | ||
|   | 5ab772f09c | ||
|   | 2a89386232 | ||
|   | 4d9be98dbc | ||
|   | 6737907826 | ||
|   | c060b77446 | ||
|   | 7e8caf30c0 | ||
|   | ca3e054750 | ||
|   | 1da1558f46 | ||
|   | 25c67d257c | ||
|   | a17d16d59c | ||
|   | d16076ff3e | ||
|   | 6c57e8a063 | ||
|   | db1f388878 | ||
|   | 0f2999fe2b | ||
|   | 53bfd6b24c | ||
|   | 5700e7792a | ||
|   | 38c2e5b8d5 | ||
|   | 48f9678a32 | ||
|   | beddbc2ad1 | ||
|   | f89197d73e | ||
|   | 944d65c762 | ||
|   | f945612bd0 | ||
|   | 59188de113 | ||
|   | 352d08e3e5 | ||
|   | bacb5e4f44 | ||
|   | 008af8660b | ||
|   | 886fa72324 | ||
|   | 2c5bae429a | ||
|   | f265fc1238 | ||
|   | 1394ce65b4 | ||
|   | 67ccb77197 | ||
|   | 63ef36e8d8 | ||
|   | 0b65e5d40f | ||
|   | 629be17af4 | ||
|   | fd28827864 | ||
|   | 8c61d9a9b1 | ||
|   | 975d35dbab | ||
|   | 8b769664c4 | ||
|   | 76f270a46a | ||
|   | 9dab1b7f28 | ||
|   | d3e5bbf437 | ||
|   | 18a25c5d78 | ||
|   | 924f47f7b6 | ||
|   | 22ff1c4a93 | ||
|   | 35409e1101 | ||
|   | 65d781128a | ||
|   | c35b1b07e2 | ||
|   | 066f6a0630 | ||
|   | 12ed57418c | ||
|   | 8b1be5cd73 | ||
|   | 780083dbc6 | ||
|   | 4919603f66 | ||
|   | dd26ced164 | ||
|   | bd2d82a5d3 | ||
|   | c4cd138b92 | ||
|   | 65697b3bf3 | ||
|   | 50317b111d | ||
|   | d7975ea287 | ||
|   | 714d709a31 | ||
|   | 11577ec054 | ||
|   | 79bf58f9b5 | ||
|   | cd8a562267 | ||
|   | de3ef3ed58 | ||
|   | 8908741806 | ||
|   | ba7678f9cc | ||
|   | a70c83768e | ||
|   | 04b4d394d9 | ||
|   | 130f12985a | ||
|   | 4ca5d43cd8 | ||
|   | 4bbf139aa7 | ||
|   | 47739636a9 | ||
|   | 407ae733ab | ||
|   | c39f7013e1 | ||
|   | a4a028323e | ||
|   | 780ee4e501 | ||
|   | d7b51547c0 | ||
|   | 43030f36db | ||
|   | 48c63f1653 | ||
|   | 90f479b6d5 | ||
|   | 6fd2957163 | ||
|   | d3a1c71917 | ||
|   | af1588c05f | ||
|   | 2250865fb0 | ||
|   | 99f770caa8 | ||
|   | 00122de6a9 | ||
|   | a70515c0fd | ||
|   | 398edd0689 | ||
|   | 6562df768d | ||
|   | 06769acd71 | ||
|   | 32dac6943d | ||
|   | 90834c78fe | ||
|   | 47917f24c4 | ||
|   | d614aa40e3 | ||
|   | bc4ba05fcb | ||
|   | 8d9453b9e8 | ||
|   | e4f320a4d0 | ||
|   | ef9f2ba7af | ||
|   | 4a3b72771f | ||
|   | 913f32929b | ||
|   | 9834872bf6 | ||
|   | 94a23d2a1e | ||
|   | 608bf69880 | ||
|   | 032b3df5af | ||
|   | 9d11a41fe4 | ||
|   | 2989501131 | ||
|   | 7b0817e8e1 | ||
|   | 9d4288b2d4 | ||
|   | 3486df383b | ||
|   | b60016e831 | ||
|   | 5aafe895fc | ||
|   | b853d2e155 | ||
|   | b7ab059084 | ||
|   | c91778f8c0 | ||
|   | 5016f3eac8 | ||
|   | efb1bb90a0 | ||
|   | 4cf393bb4b | ||
|   | ce4e242a6f | ||
|   | b27bec212f | ||
|   | 704519c7e3 | ||
|   | 6b79f40c3d | ||
|   | dd27fd1739 | ||
|   | dfa50793d8 | ||
|   | 2a7c35dd46 | ||
|   | f2ffd10bb2 | ||
|   | 8da531359e | ||
|   | e2b944cf43 | ||
|   | 3ec05685f7 | ||
|   | e103fd46ca | ||
|   | 877bfd69d1 | ||
|   | e0ef49f205 | ||
|   | f68cd00fe3 | ||
|   | ca70d215cf | ||
|   | d0390a0c92 | ||
|   | dd2535c38a | ||
|   | b78d180170 | ||
|   | 26dca1661e | ||
|   | f853f8594d | ||
|   | 8307aa73fb | ||
|   | d0da491e1e | ||
|   | 6e249060cf | ||
|   | fbcd7b5f83 | ||
|   | 9ac0a67581 | ||
|   | befdc8f3b6 | ||
|   | bb198c95e2 | ||
|   | c1195541b7 | ||
|   | 26844eb57b | ||
|   | a7732b672e | ||
|   | 677b3ce82f | ||
|   | fabfe17d5e | ||
|   | 82696d5d5d | ||
|   | 9eea4fb835 | ||
|   | 484aaeb204 | ||
|   | 8e589a8a47 | ||
|   | 2f21eb2db6 | ||
|   | c11529618a | ||
|   | 58c3c7ae38 | ||
|   | c8650f7ecd | ||
|   | 14e7543a5a | ||
|   | bf6705f584 | ||
|   | a9f53ce7ea | ||
|   | a45ea17042 | ||
|   | 4950f30890 | ||
|   | 7df7f00385 | ||
|   | d2250ea7fd | ||
|   | 17093b83ca | ||
|   | 5d8683a5cd | ||
|   | cede88e5bb | ||
|   | aadc71642a | ||
|   | 67d28bff12 | ||
|   | 7ee40b5d1c | ||
|   | db22af36ec | ||
|   | f8b5ab8cfa | ||
|   | 298f16f954 | ||
|   | 3d97cbbdaf | ||
|   | ce6b9a2dba | ||
|   | c3197e3e5c | ||
|   | d420d8dd1b | ||
|   | 3fabeaa1f4 | ||
|   | 35aa7098cd | ||
|   | 9d6192a5b8 | ||
|   | 76b1bd672d | ||
|   | 469ec9416a | ||
|   | 70af3439e9 | ||
|   | bb3c20965e | ||
|   | 5f59ee7942 | ||
|   | 8f89e68781 | ||
|   | 10bff13a66 | ||
|   | 166ff8a3c7 | ||
|   | b4622a328b | ||
|   | cc253000e4 | ||
|   | 42e4fcf23a | ||
|   | 9c63128668 | ||
|   | 9933b57430 | ||
|   | 84c92dc00f | ||
|   | 42154ad5bc | ||
|   | 96f1b0741c | ||
|   | bac268e243 | ||
|   | 3798eadccd | ||
|   | 2537186d43 | ||
|   | 0eecc6a417 | ||
|   | 0dc13f4c4a | ||
|   | f577e0ce15 | ||
|   | bd1b906527 | ||
|   | ecfef3e5bf | ||
|   | 3d3538e422 | ||
|   | 0cdad20c75 | ||
|   | 50144133c5 | ||
|   | 089cb705e8 | ||
|   | 525e1076ad | ||
|   | 282962bd36 | ||
|   | c93c2ab1c3 | ||
|   | 7b09a4d847 | ||
|   | 73a25b30ea | ||
|   | ac260dd81e | ||
|   | 48a2034671 | ||
|   | a9ce0c631e | ||
|   | afc7bc33cb | ||
|   | 168da92b9a | ||
|   | d70ad093af | ||
|   | 2a2e2770cc | ||
|   | 42cc71e80b | ||
|   | 496c19234c | ||
|   | 4f81667d76 | ||
|   | 56327689a2 | ||
|   | ad84831537 | ||
|   | 5f263296ea | ||
|   | 89650ea3a6 | ||
|   | 79f8295303 | ||
|   | 400e58103d | ||
|   | fcee8ee784 | ||
|   | 9148eb002b | ||
|   | 559e370f44 | ||
|   | cdeb10b5cd | ||
|   | e6162a90e6 | ||
|   | 9a6422a81e | ||
|   | fcea44c6d5 | ||
|   | 5d73273f6f | ||
|   | c11a0611d9 | ||
|   | 796495886e | ||
|   | fa27f667c8 | ||
|   | fc9713a1d2 | ||
|   | 62bcfa8c57 | ||
|   | 7f9886379c | ||
|   | c6e4b225b1 | ||
|   | 1c0f31f9f7 | ||
|   | 41292a3827 | ||
|   | 20f1be02df | ||
|   | a339e5cfb5 | ||
|   | f46f4a995b | ||
|   | 4ddba33f78 | ||
|   | e3b7aa8428 | ||
|   | d981cef6b9 | ||
|   | 6fa81ee96e | ||
|   | a1a337ade9 | ||
|   | c774b3c696 | ||
|   | 3e34db3170 | ||
|   | 317d4edfa8 | ||
|   | 9b12003c35 | ||
|   | 4ea170b8a0 | ||
|   | 49f2bf76a8 | ||
|   | 01c62591d1 | ||
|   | 1e91866f77 | ||
|   | 9656ee5d1d | ||
|   | a5f1e12a02 | ||
|   | ca9e792253 | ||
|   | aff24732b9 | ||
|   | 455fa214b6 | ||
|   | a9c5e5ca6e | ||
|   | cefcb9fde3 | ||
|   | bca4e93076 | ||
|   | 67c20aebb7 | ||
|   | 448711e39f | ||
|   | 8bf48f237d | ||
|   | 7c0578dc86 | ||
|   | 55033ffb0a | ||
|   | b4a9bf701a | ||
|   | a015dce0e2 | ||
|   | 28ab2e48ae | ||
|   | 6febd1c1df | ||
|   | 6350728be2 | ||
|   | a7c26e7338 | ||
|   | c880557666 | ||
|   | 85689a531f | ||
|   | cc14dfb8ec | ||
|   | 91d7d0b333 | ||
|   | 9887c9b2d6 | ||
|   | d2fee313ec | ||
|   | fa7f58e433 | ||
|   | 71cd2a571e | ||
|   | 7c094bfe2f | ||
|   | 0f30658329 | ||
|   | 31c1cf5a9d | ||
|   | e63fc1bed4 | ||
|   | efa1739b74 | ||
|   | 5ffecde73f | ||
|   | 08d13955dd | ||
|   | 531147dd5e | ||
|   | a17c95f5e4 | ||
|   | eadaf08c16 | ||
|   | 4a9c9b6fdb | ||
|   | b969ab48d9 | ||
|   | 8fa8a6299b | ||
|   | b2b0870b3a | ||
|   | 4fb757d1e0 | ||
|   | 241bce7aaf | ||
|   | 33ec2ae8d9 | ||
|   | c801b2051a | ||
|   | 7976fcac55 | ||
|   | e9f9a10fba | ||
|   | 1cdfc31e1f | ||
|   | 19dab5e6cc | ||
|   | c0f9969b9e | ||
|   | a0ddb8a2fa | ||
|   | c1d1facd06 | ||
|   | b26559878f | ||
|   | fd46a318a2 | ||
|   | 5d4f3985be | ||
|   | 360babf799 | ||
|   | a1b92edbb3 | ||
|   | 12c978739a | ||
|   | 4bc60dafeb | ||
|   | bf5b0a1bfb | ||
|   | bfe9de8510 | ||
|   | 5ecd3c6a09 | ||
|   | 608d11f515 | ||
|   | c7f8537dd9 | ||
|   | 723f839911 | ||
|   | 61224dbcdd | ||
|   | c3afc93a69 | ||
|   | 7b8af56340 | ||
|   | 539179f45b | ||
|   | 7217e148fb | ||
|   | d29b5e812b | ||
|   | 1e923b0d29 | ||
|   | f7e9d77f34 | ||
|   | 41cc67c542 | ||
|   | c645c7658d | ||
|   | b874fe2da8 | ||
|   | c7deaa4c74 | ||
|   | e6812ac99d | ||
|   | 719d3927d7 | ||
|   | 55e663a8d7 | ||
|   | 2c62dc26c8 | ||
|   | 3d4a70b821 | ||
|   | 4bcc7bd1f2 | ||
|   | f49d89ee04 | ||
|   | dabc127362 | ||
|   | c25c991809 | ||
|   | f45f96f8f8 | ||
|   | 1538eff6d8 | ||
|   | 00b2685b9c | ||
|   | 8e3e03229e | ||
|   | 9d8d675e0e | ||
|   | 933605d7e8 | ||
|   | b3d9ef88ec | ||
|   | 8958b6916c | ||
|   | 9fc3bef87a | ||
|   | d80044c235 | ||
|   | 3bc2ddccc8 | ||
|   | 8ab470f1b2 | 
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -23,6 +23,8 @@ updates_key.pem | ||||
| *.vtt | ||||
| *.flv | ||||
| *.mp4 | ||||
| *.m4a | ||||
| *.m4v | ||||
| *.part | ||||
| test/testdata | ||||
| .tox | ||||
|   | ||||
							
								
								
									
										432
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										432
									
								
								README.md
									
									
									
									
									
								
							| @@ -14,187 +14,246 @@ your Unix box, on Windows or on Mac OS X. It is released to the public domain, | ||||
| which means you can modify it, redistribute it or use it however you like. | ||||
|  | ||||
| # OPTIONS | ||||
|     -h, --help                 print this help text and exit | ||||
|     --version                  print program version and exit | ||||
|     -U, --update               update this program to latest version. Make sure | ||||
|                                that you have sufficient permissions (run with | ||||
|                                sudo if needed) | ||||
|     -i, --ignore-errors        continue on download errors, for example to to | ||||
|                                skip unavailable videos in a playlist | ||||
|     --abort-on-error           Abort downloading of further videos (in the | ||||
|                                playlist or the command line) if an error occurs | ||||
|     --dump-user-agent          display the current browser identification | ||||
|     --user-agent UA            specify a custom user agent | ||||
|     --referer REF              specify a custom referer, use if the video access | ||||
|                                is restricted to one domain | ||||
|     --list-extractors          List all supported extractors and the URLs they | ||||
|                                would handle | ||||
|     --extractor-descriptions   Output descriptions of all supported extractors | ||||
|     --proxy URL                Use the specified HTTP/HTTPS proxy. Pass in an | ||||
|                                empty string (--proxy "") for direct connection | ||||
|     --no-check-certificate     Suppress HTTPS certificate validation. | ||||
|     --cache-dir DIR            Location in the filesystem where youtube-dl can | ||||
|                                store downloaded information permanently. By | ||||
|                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache | ||||
|                                /youtube-dl . | ||||
|     --no-cache-dir             Disable filesystem caching | ||||
|     --bidi-workaround          Work around terminals that lack bidirectional | ||||
|                                text support. Requires bidiv or fribidi | ||||
|                                executable in PATH | ||||
|     -h, --help                       print this help text and exit | ||||
|     --version                        print program version and exit | ||||
|     -U, --update                     update this program to latest version. Make | ||||
|                                      sure that you have sufficient permissions | ||||
|                                      (run with sudo if needed) | ||||
|     -i, --ignore-errors              continue on download errors, for example to | ||||
|                                      skip unavailable videos in a playlist | ||||
|     --abort-on-error                 Abort downloading of further videos (in the | ||||
|                                      playlist or the command line) if an error | ||||
|                                      occurs | ||||
|     --dump-user-agent                display the current browser identification | ||||
|     --user-agent UA                  specify a custom user agent | ||||
|     --referer REF                    specify a custom referer, use if the video | ||||
|                                      access is restricted to one domain | ||||
|     --list-extractors                List all supported extractors and the URLs | ||||
|                                      they would handle | ||||
|     --extractor-descriptions         Output descriptions of all supported | ||||
|                                      extractors | ||||
|     --proxy URL                      Use the specified HTTP/HTTPS proxy. Pass in | ||||
|                                      an empty string (--proxy "") for direct | ||||
|                                      connection | ||||
|     --no-check-certificate           Suppress HTTPS certificate validation. | ||||
|     --prefer-insecure                Use an unencrypted connection to retrieve | ||||
|                                      information about the video. (Currently | ||||
|                                      supported only for YouTube) | ||||
|     --cache-dir DIR                  Location in the filesystem where youtube-dl | ||||
|                                      can store some downloaded information | ||||
|                                      permanently. By default $XDG_CACHE_HOME | ||||
|                                      /youtube-dl or ~/.cache/youtube-dl . At the | ||||
|                                      moment, only YouTube player files (for | ||||
|                                      videos with obfuscated signatures) are | ||||
|                                      cached, but that may change. | ||||
|     --no-cache-dir                   Disable filesystem caching | ||||
|     --socket-timeout None            Time to wait before giving up, in seconds | ||||
|     --bidi-workaround                Work around terminals that lack | ||||
|                                      bidirectional text support. Requires bidiv | ||||
|                                      or fribidi executable in PATH | ||||
|     --default-search PREFIX          Use this prefix for unqualified URLs. For | ||||
|                                      example "gvsearch2:" downloads two videos | ||||
|                                      from google videos for  youtube-dl "large | ||||
|                                      apple". By default (with value "auto") | ||||
|                                      youtube-dl guesses. | ||||
|     --ignore-config                  Do not read configuration files. When given | ||||
|                                      in the global configuration file /etc | ||||
|                                      /youtube-dl.conf: do not read the user | ||||
|                                      configuration in ~/.config/youtube-dl.conf | ||||
|                                      (%APPDATA%/youtube-dl/config.txt on | ||||
|                                      Windows) | ||||
|  | ||||
| ## Video Selection: | ||||
|     --playlist-start NUMBER    playlist video to start at (default is 1) | ||||
|     --playlist-end NUMBER      playlist video to end at (default is last) | ||||
|     --match-title REGEX        download only matching titles (regex or caseless | ||||
|                                sub-string) | ||||
|     --reject-title REGEX       skip download for matching titles (regex or | ||||
|                                caseless sub-string) | ||||
|     --max-downloads NUMBER     Abort after downloading NUMBER files | ||||
|     --min-filesize SIZE        Do not download any videos smaller than SIZE | ||||
|                                (e.g. 50k or 44.6m) | ||||
|     --max-filesize SIZE        Do not download any videos larger than SIZE (e.g. | ||||
|                                50k or 44.6m) | ||||
|     --date DATE                download only videos uploaded in this date | ||||
|     --datebefore DATE          download only videos uploaded before this date | ||||
|     --dateafter DATE           download only videos uploaded after this date | ||||
|     --min-views COUNT          Do not download any videos with less than COUNT | ||||
|                                views | ||||
|     --max-views COUNT          Do not download any videos with more than COUNT | ||||
|                                views | ||||
|     --no-playlist              download only the currently playing video | ||||
|     --age-limit YEARS          download only videos suitable for the given age | ||||
|     --download-archive FILE    Download only videos not listed in the archive | ||||
|                                file. Record the IDs of all downloaded videos in | ||||
|                                it. | ||||
|     --playlist-start NUMBER          playlist video to start at (default is 1) | ||||
|     --playlist-end NUMBER            playlist video to end at (default is last) | ||||
|     --match-title REGEX              download only matching titles (regex or | ||||
|                                      caseless sub-string) | ||||
|     --reject-title REGEX             skip download for matching titles (regex or | ||||
|                                      caseless sub-string) | ||||
|     --max-downloads NUMBER           Abort after downloading NUMBER files | ||||
|     --min-filesize SIZE              Do not download any videos smaller than | ||||
|                                      SIZE (e.g. 50k or 44.6m) | ||||
|     --max-filesize SIZE              Do not download any videos larger than SIZE | ||||
|                                      (e.g. 50k or 44.6m) | ||||
|     --date DATE                      download only videos uploaded in this date | ||||
|     --datebefore DATE                download only videos uploaded on or before | ||||
|                                      this date (i.e. inclusive) | ||||
|     --dateafter DATE                 download only videos uploaded on or after | ||||
|                                      this date (i.e. inclusive) | ||||
|     --min-views COUNT                Do not download any videos with less than | ||||
|                                      COUNT views | ||||
|     --max-views COUNT                Do not download any videos with more than | ||||
|                                      COUNT views | ||||
|     --no-playlist                    download only the currently playing video | ||||
|     --age-limit YEARS                download only videos suitable for the given | ||||
|                                      age | ||||
|     --download-archive FILE          Download only videos not listed in the | ||||
|                                      archive file. Record the IDs of all | ||||
|                                      downloaded videos in it. | ||||
|     --include-ads                    Download advertisements as well | ||||
|                                      (experimental) | ||||
|     --youtube-include-dash-manifest  Try to download the DASH manifest on | ||||
|                                      YouTube videos (experimental) | ||||
|  | ||||
| ## Download Options: | ||||
|     -r, --rate-limit LIMIT     maximum download rate in bytes per second (e.g. | ||||
|                                50K or 4.2M) | ||||
|     -R, --retries RETRIES      number of retries (default is 10) | ||||
|     --buffer-size SIZE         size of download buffer (e.g. 1024 or 16K) | ||||
|                                (default is 1024) | ||||
|     --no-resize-buffer         do not automatically adjust the buffer size. By | ||||
|                                default, the buffer size is automatically resized | ||||
|                                from an initial value of SIZE. | ||||
|     -r, --rate-limit LIMIT           maximum download rate in bytes per second | ||||
|                                      (e.g. 50K or 4.2M) | ||||
|     -R, --retries RETRIES            number of retries (default is 10) | ||||
|     --buffer-size SIZE               size of download buffer (e.g. 1024 or 16K) | ||||
|                                      (default is 1024) | ||||
|     --no-resize-buffer               do not automatically adjust the buffer | ||||
|                                      size. By default, the buffer size is | ||||
|                                      automatically resized from an initial value | ||||
|                                      of SIZE. | ||||
|  | ||||
| ## Filesystem Options: | ||||
|     -t, --title                use title in file name (default) | ||||
|     --id                       use only video ID in file name | ||||
|     -l, --literal              [deprecated] alias of --title | ||||
|     -A, --auto-number          number downloaded files starting from 00000 | ||||
|     -o, --output TEMPLATE      output filename template. Use %(title)s to get | ||||
|                                the title, %(uploader)s for the uploader name, | ||||
|                                %(uploader_id)s for the uploader nickname if | ||||
|                                different, %(autonumber)s to get an automatically | ||||
|                                incremented number, %(ext)s for the filename | ||||
|                                extension, %(format)s for the format description | ||||
|                                (like "22 - 1280x720" or "HD"),%(format_id)s for | ||||
|                                the unique id of the format (like Youtube's | ||||
|                                itags: "137"),%(upload_date)s for the upload date | ||||
|                                (YYYYMMDD), %(extractor)s for the provider | ||||
|                                (youtube, metacafe, etc), %(id)s for the video id | ||||
|                                , %(playlist)s for the playlist the video is in, | ||||
|                                %(playlist_index)s for the position in the | ||||
|                                playlist and %% for a literal percent. Use - to | ||||
|                                output to stdout. Can also be used to download to | ||||
|                                a different directory, for example with -o '/my/d | ||||
|                                ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' . | ||||
|     --autonumber-size NUMBER   Specifies the number of digits in %(autonumber)s | ||||
|                                when it is present in output filename template or | ||||
|                                --auto-number option is given | ||||
|     --restrict-filenames       Restrict filenames to only ASCII characters, and | ||||
|                                avoid "&" and spaces in filenames | ||||
|     -a, --batch-file FILE      file containing URLs to download ('-' for stdin) | ||||
|     --load-info FILE           json file containing the video information | ||||
|                                (created with the "--write-json" option | ||||
|     -w, --no-overwrites        do not overwrite files | ||||
|     -c, --continue             force resume of partially downloaded files. By | ||||
|                                default, youtube-dl will resume downloads if | ||||
|                                possible. | ||||
|     --no-continue              do not resume partially downloaded files (restart | ||||
|                                from beginning) | ||||
|     --cookies FILE             file to read cookies from and dump cookie jar in | ||||
|     --no-part                  do not use .part files | ||||
|     --no-mtime                 do not use the Last-modified header to set the | ||||
|                                file modification time | ||||
|     --write-description        write video description to a .description file | ||||
|     --write-info-json          write video metadata to a .info.json file | ||||
|     --write-annotations        write video annotations to a .annotation file | ||||
|     --write-thumbnail          write thumbnail image to disk | ||||
|     -t, --title                      use title in file name (default) | ||||
|     --id                             use only video ID in file name | ||||
|     -l, --literal                    [deprecated] alias of --title | ||||
|     -A, --auto-number                number downloaded files starting from 00000 | ||||
|     -o, --output TEMPLATE            output filename template. Use %(title)s to | ||||
|                                      get the title, %(uploader)s for the | ||||
|                                      uploader name, %(uploader_id)s for the | ||||
|                                      uploader nickname if different, | ||||
|                                      %(autonumber)s to get an automatically | ||||
|                                      incremented number, %(ext)s for the | ||||
|                                      filename extension, %(format)s for the | ||||
|                                      format description (like "22 - 1280x720" or | ||||
|                                      "HD"), %(format_id)s for the unique id of | ||||
|                                      the format (like Youtube's itags: "137"), | ||||
|                                      %(upload_date)s for the upload date | ||||
|                                      (YYYYMMDD), %(extractor)s for the provider | ||||
|                                      (youtube, metacafe, etc), %(id)s for the | ||||
|                                      video id, %(playlist)s for the playlist the | ||||
|                                      video is in, %(playlist_index)s for the | ||||
|                                      position in the playlist and %% for a | ||||
|                                      literal percent. %(height)s and %(width)s | ||||
|                                      for the width and height of the video | ||||
|                                      format. %(resolution)s for a textual | ||||
|                                      description of the resolution of the video | ||||
|                                      format. Use - to output to stdout. Can also | ||||
|                                      be used to download to a different | ||||
|                                      directory, for example with -o '/my/downloa | ||||
|                                      ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' . | ||||
|     --autonumber-size NUMBER         Specifies the number of digits in | ||||
|                                      %(autonumber)s when it is present in output | ||||
|                                      filename template or --auto-number option | ||||
|                                      is given | ||||
|     --restrict-filenames             Restrict filenames to only ASCII | ||||
|                                      characters, and avoid "&" and spaces in | ||||
|                                      filenames | ||||
|     -a, --batch-file FILE            file containing URLs to download ('-' for | ||||
|                                      stdin) | ||||
|     --load-info FILE                 json file containing the video information | ||||
|                                      (created with the "--write-json" option) | ||||
|     -w, --no-overwrites              do not overwrite files | ||||
|     -c, --continue                   force resume of partially downloaded files. | ||||
|                                      By default, youtube-dl will resume | ||||
|                                      downloads if possible. | ||||
|     --no-continue                    do not resume partially downloaded files | ||||
|                                      (restart from beginning) | ||||
|     --cookies FILE                   file to read cookies from and dump cookie | ||||
|                                      jar in | ||||
|     --no-part                        do not use .part files | ||||
|     --no-mtime                       do not use the Last-modified header to set | ||||
|                                      the file modification time | ||||
|     --write-description              write video description to a .description | ||||
|                                      file | ||||
|     --write-info-json                write video metadata to a .info.json file | ||||
|     --write-annotations              write video annotations to a .annotation | ||||
|                                      file | ||||
|     --write-thumbnail                write thumbnail image to disk | ||||
|  | ||||
| ## Verbosity / Simulation Options: | ||||
|     -q, --quiet                activates quiet mode | ||||
|     -s, --simulate             do not download the video and do not write | ||||
|                                anything to disk | ||||
|     --skip-download            do not download the video | ||||
|     -g, --get-url              simulate, quiet but print URL | ||||
|     -e, --get-title            simulate, quiet but print title | ||||
|     --get-id                   simulate, quiet but print id | ||||
|     --get-thumbnail            simulate, quiet but print thumbnail URL | ||||
|     --get-description          simulate, quiet but print video description | ||||
|     --get-duration             simulate, quiet but print video length | ||||
|     --get-filename             simulate, quiet but print output filename | ||||
|     --get-format               simulate, quiet but print output format | ||||
|     -j, --dump-json            simulate, quiet but print JSON information | ||||
|     --newline                  output progress bar as new lines | ||||
|     --no-progress              do not print progress bar | ||||
|     --console-title            display progress in console titlebar | ||||
|     -v, --verbose              print various debugging information | ||||
|     --dump-intermediate-pages  print downloaded pages to debug problems(very | ||||
|                                verbose) | ||||
|     --write-pages              Write downloaded intermediary pages to files in | ||||
|                                the current directory to debug problems | ||||
|     -q, --quiet                      activates quiet mode | ||||
|     -s, --simulate                   do not download the video and do not write | ||||
|                                      anything to disk | ||||
|     --skip-download                  do not download the video | ||||
|     -g, --get-url                    simulate, quiet but print URL | ||||
|     -e, --get-title                  simulate, quiet but print title | ||||
|     --get-id                         simulate, quiet but print id | ||||
|     --get-thumbnail                  simulate, quiet but print thumbnail URL | ||||
|     --get-description                simulate, quiet but print video description | ||||
|     --get-duration                   simulate, quiet but print video length | ||||
|     --get-filename                   simulate, quiet but print output filename | ||||
|     --get-format                     simulate, quiet but print output format | ||||
|     -j, --dump-json                  simulate, quiet but print JSON information | ||||
|     --newline                        output progress bar as new lines | ||||
|     --no-progress                    do not print progress bar | ||||
|     --console-title                  display progress in console titlebar | ||||
|     -v, --verbose                    print various debugging information | ||||
|     --dump-intermediate-pages        print downloaded pages to debug problems | ||||
|                                      (very verbose) | ||||
|     --write-pages                    Write downloaded intermediary pages to | ||||
|                                      files in the current directory to debug | ||||
|                                      problems | ||||
|     --print-traffic                  Display sent and read HTTP traffic | ||||
|  | ||||
| ## Video Format Options: | ||||
|     -f, --format FORMAT        video format code, specify the order of | ||||
|                                preference using slashes: "-f 22/17/18". "-f mp4" | ||||
|                                and "-f flv" are also supported | ||||
|     --all-formats              download all available video formats | ||||
|     --prefer-free-formats      prefer free video formats unless a specific one | ||||
|                                is requested | ||||
|     --max-quality FORMAT       highest quality format to download | ||||
|     -F, --list-formats         list all available formats (currently youtube | ||||
|                                only) | ||||
|     -f, --format FORMAT              video format code, specify the order of | ||||
|                                      preference using slashes: "-f 22/17/18". | ||||
|                                      "-f mp4" and "-f flv" are also supported. | ||||
|                                      You can also use the special names "best", | ||||
|                                      "bestvideo", "bestaudio", "worst", | ||||
|                                      "worstvideo" and "worstaudio". By default, | ||||
|                                      youtube-dl will pick the best quality. | ||||
|     --all-formats                    download all available video formats | ||||
|     --prefer-free-formats            prefer free video formats unless a specific | ||||
|                                      one is requested | ||||
|     --max-quality FORMAT             highest quality format to download | ||||
|     -F, --list-formats               list all available formats | ||||
|  | ||||
| ## Subtitle Options: | ||||
|     --write-sub                write subtitle file | ||||
|     --write-auto-sub           write automatic subtitle file (youtube only) | ||||
|     --all-subs                 downloads all the available subtitles of the | ||||
|                                video | ||||
|     --list-subs                lists all available subtitles for the video | ||||
|     --sub-format FORMAT        subtitle format (default=srt) ([sbv/vtt] youtube | ||||
|                                only) | ||||
|     --sub-lang LANGS           languages of the subtitles to download (optional) | ||||
|                                separated by commas, use IETF language tags like | ||||
|                                'en,pt' | ||||
|     --write-sub                      write subtitle file | ||||
|     --write-auto-sub                 write automatic subtitle file (youtube | ||||
|                                      only) | ||||
|     --all-subs                       downloads all the available subtitles of | ||||
|                                      the video | ||||
|     --list-subs                      lists all available subtitles for the video | ||||
|     --sub-format FORMAT              subtitle format (default=srt) ([sbv/vtt] | ||||
|                                      youtube only) | ||||
|     --sub-lang LANGS                 languages of the subtitles to download | ||||
|                                      (optional) separated by commas, use IETF | ||||
|                                      language tags like 'en,pt' | ||||
|  | ||||
| ## Authentication Options: | ||||
|     -u, --username USERNAME    account username | ||||
|     -p, --password PASSWORD    account password | ||||
|     -n, --netrc                use .netrc authentication data | ||||
|     --video-password PASSWORD  video password (vimeo only) | ||||
|     -u, --username USERNAME          account username | ||||
|     -p, --password PASSWORD          account password | ||||
|     -n, --netrc                      use .netrc authentication data | ||||
|     --video-password PASSWORD        video password (vimeo, smotri) | ||||
|  | ||||
| ## Post-processing Options: | ||||
|     -x, --extract-audio        convert video files to audio-only files (requires | ||||
|                                ffmpeg or avconv and ffprobe or avprobe) | ||||
|     --audio-format FORMAT      "best", "aac", "vorbis", "mp3", "m4a", "opus", or | ||||
|                                "wav"; best by default | ||||
|     --audio-quality QUALITY    ffmpeg/avconv audio quality specification, insert | ||||
|                                a value between 0 (better) and 9 (worse) for VBR | ||||
|                                or a specific bitrate like 128K (default 5) | ||||
|     --recode-video FORMAT      Encode the video to another format if necessary | ||||
|                                (currently supported: mp4|flv|ogg|webm) | ||||
|     -k, --keep-video           keeps the video file on disk after the post- | ||||
|                                processing; the video is erased by default | ||||
|     --no-post-overwrites       do not overwrite post-processed files; the post- | ||||
|                                processed files are overwritten by default | ||||
|     --embed-subs               embed subtitles in the video (only for mp4 | ||||
|                                videos) | ||||
|     --add-metadata             add metadata to the files | ||||
|     -x, --extract-audio              convert video files to audio-only files | ||||
|                                      (requires ffmpeg or avconv and ffprobe or | ||||
|                                      avprobe) | ||||
|     --audio-format FORMAT            "best", "aac", "vorbis", "mp3", "m4a", | ||||
|                                      "opus", or "wav"; best by default | ||||
|     --audio-quality QUALITY          ffmpeg/avconv audio quality specification, | ||||
|                                      insert a value between 0 (better) and 9 | ||||
|                                      (worse) for VBR or a specific bitrate like | ||||
|                                      128K (default 5) | ||||
|     --recode-video FORMAT            Encode the video to another format if | ||||
|                                      necessary (currently supported: | ||||
|                                      mp4|flv|ogg|webm) | ||||
|     -k, --keep-video                 keeps the video file on disk after the | ||||
|                                      post-processing; the video is erased by | ||||
|                                      default | ||||
|     --no-post-overwrites             do not overwrite post-processed files; the | ||||
|                                      post-processed files are overwritten by | ||||
|                                      default | ||||
|     --embed-subs                     embed subtitles in the video (only for mp4 | ||||
|                                      videos) | ||||
|     --add-metadata                   write metadata to the video file | ||||
|     --xattrs                         write metadata to the video file's xattrs | ||||
|                                      (using dublin core and xdg standards) | ||||
|     --prefer-avconv                  Prefer avconv over ffmpeg for running the | ||||
|                                      postprocessors (default) | ||||
|     --prefer-ffmpeg                  Prefer ffmpeg over avconv for running the | ||||
|                                      postprocessors | ||||
|  | ||||
| # CONFIGURATION | ||||
|  | ||||
| You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`. | ||||
| You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`. | ||||
|  | ||||
| # OUTPUT TEMPLATE | ||||
|  | ||||
| @@ -229,9 +288,14 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb | ||||
|   | ||||
| Examples: | ||||
|  | ||||
| 	$ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months | ||||
| 	$ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970 | ||||
| 	$ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010 | ||||
|     # Download only the videos uploaded in the last 6 months | ||||
|     $ youtube-dl --dateafter now-6months | ||||
|  | ||||
|     # Download only the videos uploaded on January 1, 1970 | ||||
|     $ youtube-dl --date 19700101 | ||||
|  | ||||
|     $ # will only download the videos uploaded in the 200x decade | ||||
|     $ youtube-dl --dateafter 20000101 --datebefore 20091231 | ||||
|  | ||||
| # FAQ | ||||
|  | ||||
| @@ -276,11 +340,31 @@ Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unz | ||||
|  | ||||
| To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29). | ||||
|  | ||||
| # COPYRIGHT | ||||
| # DEVELOPER INSTRUCTIONS | ||||
|  | ||||
| youtube-dl is released into the public domain by the copyright holders. | ||||
| Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution. | ||||
|  | ||||
| This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain. | ||||
| To run youtube-dl as a developer, you don't need to build anything either. Simply execute | ||||
|  | ||||
|     python -m youtube_dl | ||||
|  | ||||
| To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work: | ||||
|  | ||||
|     python -m unittest discover | ||||
|     python test/test_download.py | ||||
|     nosetests | ||||
|  | ||||
| If you want to create a build of youtube-dl yourself, you'll need | ||||
|  | ||||
| * python | ||||
| * make | ||||
| * pandoc | ||||
| * zip | ||||
| * nosetests | ||||
|  | ||||
| ### Adding support for a new site | ||||
|  | ||||
| If you want to add support for a new site, copy *any* [recently modified](https://github.com/rg3/youtube-dl/commits/master/youtube_dl/extractor) file in `youtube_dl/extractor`, add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Don't forget to run the tests with `python test/test_download.py TestDownload.test_YourExtractor`! For a detailed tutorial, refer to [this blog post](http://filippo.io/add-support-for-a-new-video-site-to-youtube-dl/). | ||||
|  | ||||
| # BUGS | ||||
|  | ||||
| @@ -310,7 +394,7 @@ Site support requests must contain an example URL. An example URL is a URL you m | ||||
|  | ||||
| ###  Are you using the latest version? | ||||
|  | ||||
| Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. Ábout 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. | ||||
| Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. | ||||
|  | ||||
| ###  Is the issue already documented? | ||||
|  | ||||
| @@ -335,3 +419,13 @@ In particular, every site support request issue should only pertain to services | ||||
| ###  Is anyone going to need the feature? | ||||
|  | ||||
| Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them. | ||||
|  | ||||
| ###  Is your question about youtube-dl? | ||||
|  | ||||
| It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug. | ||||
|  | ||||
| # COPYRIGHT | ||||
|  | ||||
| youtube-dl is released into the public domain by the copyright holders. | ||||
|  | ||||
| This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain. | ||||
|   | ||||
| @@ -6,7 +6,7 @@ __youtube_dl() | ||||
|     prev="${COMP_WORDS[COMP_CWORD-1]}" | ||||
|     opts="{{flags}}" | ||||
|     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" | ||||
|     fileopts="-a|--batch-file|--download-archive|--cookies" | ||||
|     fileopts="-a|--batch-file|--download-archive|--cookies|--load-info" | ||||
|     diropts="--cache-dir" | ||||
|  | ||||
|     if [[ ${prev} =~ ${fileopts} ]]; then | ||||
|   | ||||
| @@ -3,6 +3,9 @@ | ||||
| """ | ||||
| This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check | ||||
| if we are not 'age_limit' tagging some porn site | ||||
|  | ||||
| A second approach implemented relies on a list of porn domains, to activate it | ||||
| pass the list filename as the only argument | ||||
| """ | ||||
|  | ||||
| # Allow direct execution | ||||
| @@ -11,25 +14,42 @@ import sys | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
| from test.helper import get_testcases | ||||
| from youtube_dl.utils import compat_urllib_parse_urlparse | ||||
| from youtube_dl.utils import compat_urllib_request | ||||
|  | ||||
| if len(sys.argv) > 1: | ||||
|     METHOD = 'LIST' | ||||
|     LIST = open(sys.argv[1]).read().decode('utf8').strip() | ||||
| else: | ||||
|     METHOD = 'EURISTIC' | ||||
|  | ||||
| for test in get_testcases(): | ||||
|     try: | ||||
|         webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() | ||||
|     except: | ||||
|         print('\nFail: {0}'.format(test['name'])) | ||||
|         continue | ||||
|     if METHOD == 'EURISTIC': | ||||
|         try: | ||||
|             webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() | ||||
|         except: | ||||
|             print('\nFail: {0}'.format(test['name'])) | ||||
|             continue | ||||
|  | ||||
|     webpage = webpage.decode('utf8', 'replace') | ||||
|         webpage = webpage.decode('utf8', 'replace') | ||||
|  | ||||
|     if 'porn' in webpage.lower() and ('info_dict' not in test | ||||
|                                       or 'age_limit' not in test['info_dict'] | ||||
|                                       or test['info_dict']['age_limit'] != 18): | ||||
|         RESULT = 'porn' in webpage.lower() | ||||
|  | ||||
|     elif METHOD == 'LIST': | ||||
|         domain = compat_urllib_parse_urlparse(test['url']).netloc | ||||
|         if not domain: | ||||
|             print('\nFail: {0}'.format(test['name'])) | ||||
|             continue | ||||
|         domain = '.'.join(domain.split('.')[-2:]) | ||||
|  | ||||
|         RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) | ||||
|  | ||||
|     if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] | ||||
|                    or test['info_dict']['age_limit'] != 18): | ||||
|         print('\nPotential missing age_limit check: {0}'.format(test['name'])) | ||||
|  | ||||
|     elif 'porn' not in webpage.lower() and ('info_dict' in test and | ||||
|                                             'age_limit' in test['info_dict'] and | ||||
|                                             test['info_dict']['age_limit'] == 18): | ||||
|     elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] | ||||
|                          and test['info_dict']['age_limit'] == 18): | ||||
|         print('\nPotential false negative: {0}'.format(test['name'])) | ||||
|  | ||||
|     else: | ||||
|   | ||||
| @@ -1,56 +1,76 @@ | ||||
| #!/usr/bin/env python3 | ||||
|  | ||||
| import datetime | ||||
|  | ||||
| import io | ||||
| import json | ||||
| import textwrap | ||||
|  | ||||
| import json | ||||
|  | ||||
| atom_template=textwrap.dedent("""\ | ||||
| 								<?xml version='1.0' encoding='utf-8'?> | ||||
| 								<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"> | ||||
| 									<atom:title>youtube-dl releases</atom:title> | ||||
| 									<atom:id>youtube-dl-updates-feed</atom:id> | ||||
| 									<atom:updated>@TIMESTAMP@</atom:updated> | ||||
| 									@ENTRIES@ | ||||
| 								</atom:feed>""") | ||||
| atom_template = textwrap.dedent("""\ | ||||
|     <?xml version="1.0" encoding="utf-8"?> | ||||
|     <feed xmlns="http://www.w3.org/2005/Atom"> | ||||
|         <link rel="self" href="http://rg3.github.io/youtube-dl/update/releases.atom" /> | ||||
|         <title>youtube-dl releases</title> | ||||
|         <id>https://yt-dl.org/feed/youtube-dl-updates-feed</id> | ||||
|         <updated>@TIMESTAMP@</updated> | ||||
|         @ENTRIES@ | ||||
|     </feed>""") | ||||
|  | ||||
| entry_template=textwrap.dedent(""" | ||||
| 								<atom:entry> | ||||
| 									<atom:id>youtube-dl-@VERSION@</atom:id> | ||||
| 									<atom:title>New version @VERSION@</atom:title> | ||||
| 									<atom:link href="http://rg3.github.io/youtube-dl" /> | ||||
| 									<atom:content type="xhtml"> | ||||
| 										<div xmlns="http://www.w3.org/1999/xhtml"> | ||||
| 											Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a> | ||||
| 										</div> | ||||
| 									</atom:content> | ||||
| 									<atom:author> | ||||
| 										<atom:name>The youtube-dl maintainers</atom:name> | ||||
| 									</atom:author> | ||||
| 									<atom:updated>@TIMESTAMP@</atom:updated> | ||||
| 								</atom:entry> | ||||
| 								""") | ||||
| entry_template = textwrap.dedent(""" | ||||
|     <entry> | ||||
|         <id>https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@</id> | ||||
|         <title>New version @VERSION@</title> | ||||
|         <link href="http://rg3.github.io/youtube-dl" /> | ||||
|         <content type="xhtml"> | ||||
|             <div xmlns="http://www.w3.org/1999/xhtml"> | ||||
|                 Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a> | ||||
|             </div> | ||||
|         </content> | ||||
|         <author> | ||||
|             <name>The youtube-dl maintainers</name> | ||||
|         </author> | ||||
|         <updated>@TIMESTAMP@</updated> | ||||
|     </entry> | ||||
|     """) | ||||
|  | ||||
| now = datetime.datetime.now() | ||||
| now_iso = now.isoformat() | ||||
| now_iso = now.isoformat() + 'Z' | ||||
|  | ||||
| atom_template = atom_template.replace('@TIMESTAMP@',now_iso) | ||||
|  | ||||
| entries=[] | ||||
| atom_template = atom_template.replace('@TIMESTAMP@', now_iso) | ||||
|  | ||||
| versions_info = json.load(open('update/versions.json')) | ||||
| versions = list(versions_info['versions'].keys()) | ||||
| versions.sort() | ||||
|  | ||||
| entries = [] | ||||
| for v in versions: | ||||
| 	entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-')) | ||||
| 	entry = entry.replace('@VERSION@',v) | ||||
| 	entries.append(entry) | ||||
|     fields = v.split('.') | ||||
|     year, month, day = map(int, fields[:3]) | ||||
|     faked = 0 | ||||
|     patchlevel = 0 | ||||
|     while True: | ||||
|         try: | ||||
|             datetime.date(year, month, day) | ||||
|         except ValueError: | ||||
|             day -= 1 | ||||
|             faked += 1 | ||||
|             assert day > 0 | ||||
|             continue | ||||
|         break | ||||
|     if len(fields) >= 4: | ||||
|         try: | ||||
|             patchlevel = int(fields[3]) | ||||
|         except ValueError: | ||||
|             patchlevel = 1 | ||||
|     timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel) | ||||
|  | ||||
|     entry = entry_template.replace('@TIMESTAMP@', timestamp) | ||||
|     entry = entry.replace('@VERSION@', v) | ||||
|     entries.append(entry) | ||||
|  | ||||
| entries_str = textwrap.indent(''.join(entries), '\t') | ||||
| atom_template = atom_template.replace('@ENTRIES@', entries_str) | ||||
|  | ||||
| with open('update/releases.atom','w',encoding='utf-8') as atom_file: | ||||
| 	atom_file.write(atom_template) | ||||
| with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: | ||||
|     atom_file.write(atom_template) | ||||
|  | ||||
|   | ||||
| @@ -1,20 +1,24 @@ | ||||
| import io | ||||
| import sys | ||||
| import re | ||||
|  | ||||
| README_FILE = 'README.md' | ||||
| helptext = sys.stdin.read() | ||||
|  | ||||
| with open(README_FILE) as f: | ||||
| if isinstance(helptext, bytes): | ||||
|     helptext = helptext.decode('utf-8') | ||||
|  | ||||
| with io.open(README_FILE, encoding='utf-8') as f: | ||||
|     oldreadme = f.read() | ||||
|  | ||||
| header = oldreadme[:oldreadme.index('# OPTIONS')] | ||||
| footer = oldreadme[oldreadme.index('# CONFIGURATION'):] | ||||
|  | ||||
| options = helptext[helptext.index('  General Options:')+19:] | ||||
| options = helptext[helptext.index('  General Options:') + 19:] | ||||
| options = re.sub(r'^  (\w.+)$', r'## \1', options, flags=re.M) | ||||
| options = '# OPTIONS\n' + options + '\n' | ||||
|  | ||||
| with open(README_FILE, 'w') as f: | ||||
| with io.open(README_FILE, 'w', encoding='utf-8') as f: | ||||
|     f.write(header) | ||||
|     f.write(options) | ||||
|     f.write(footer) | ||||
|   | ||||
| @@ -14,9 +14,9 @@ | ||||
|  | ||||
| set -e | ||||
|  | ||||
| skip_tests=false | ||||
| if [ "$1" = '--skip-test' ]; then | ||||
|     skip_tests=true | ||||
| skip_tests=true | ||||
| if [ "$1" = '--run-tests' ]; then | ||||
|     skip_tests=false | ||||
|     shift | ||||
| fi | ||||
|  | ||||
| @@ -24,6 +24,8 @@ if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.0 | ||||
| version="$1" | ||||
| if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi | ||||
| if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi | ||||
| useless_files=$(find youtube_dl -type f -not -name '*.py') | ||||
| if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi | ||||
| if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi | ||||
|  | ||||
| /bin/echo -e "\n### First of all, testing..." | ||||
| @@ -68,7 +70,7 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz" | ||||
| git checkout HEAD -- youtube-dl youtube-dl.exe | ||||
|  | ||||
| /bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..." | ||||
| for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done | ||||
| for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done | ||||
| scp -r "build/$version" ytdl@yt-dl.org:html/tmp/ | ||||
| ssh ytdl@yt-dl.org "mv html/tmp/$version html/downloads/" | ||||
| ssh ytdl@yt-dl.org "sh html/update_latest.sh $version" | ||||
| @@ -95,7 +97,7 @@ rm -rf build | ||||
|  | ||||
| make pypi-files | ||||
| echo "Uploading to PyPi ..." | ||||
| python setup.py sdist upload | ||||
| python setup.py sdist bdist_wheel upload | ||||
| make clean | ||||
|  | ||||
| /bin/echo -e "\n### DONE!" | ||||
|   | ||||
							
								
								
									
										29
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								setup.py
									
									
									
									
									
								
							| @@ -3,7 +3,9 @@ | ||||
|  | ||||
| from __future__ import print_function | ||||
|  | ||||
| import os.path | ||||
| import pkg_resources | ||||
| import warnings | ||||
| import sys | ||||
|  | ||||
| try: | ||||
| @@ -44,12 +46,24 @@ py2exe_params = { | ||||
| if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe': | ||||
|     params = py2exe_params | ||||
| else: | ||||
|     files_spec = [ | ||||
|         ('etc/bash_completion.d', ['youtube-dl.bash-completion']), | ||||
|         ('share/doc/youtube_dl', ['README.txt']), | ||||
|         ('share/man/man1', ['youtube-dl.1']) | ||||
|     ] | ||||
|     root = os.path.dirname(os.path.abspath(__file__)) | ||||
|     data_files = [] | ||||
|     for dirname, files in files_spec: | ||||
|         resfiles = [] | ||||
|         for fn in files: | ||||
|             if not os.path.exists(fn): | ||||
|                 warnings.warn('Skipping file %s since it is not present. Type  make  to build all automatically generated files.' % fn) | ||||
|             else: | ||||
|                 resfiles.append(fn) | ||||
|         data_files.append((dirname, resfiles)) | ||||
|  | ||||
|     params = { | ||||
|         'data_files': [  # Installing system-wide would require sudo... | ||||
|             ('etc/bash_completion.d', ['youtube-dl.bash-completion']), | ||||
|             ('share/doc/youtube_dl', ['README.txt']), | ||||
|             ('share/man/man1', ['youtube-dl.1']) | ||||
|         ] | ||||
|         'data_files': data_files, | ||||
|     } | ||||
|     if setuptools_available: | ||||
|         params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']} | ||||
| @@ -71,7 +85,10 @@ setup( | ||||
|     author_email='ytdl@yt-dl.org', | ||||
|     maintainer='Philipp Hagemeister', | ||||
|     maintainer_email='phihag@phihag.de', | ||||
|     packages=['youtube_dl', 'youtube_dl.extractor'], | ||||
|     packages=[ | ||||
|         'youtube_dl', | ||||
|         'youtube_dl.extractor', 'youtube_dl.downloader', | ||||
|         'youtube_dl.postprocessor'], | ||||
|  | ||||
|     # Provokes warning on most systems (why?!) | ||||
|     # test_suite = 'nose.collector', | ||||
|   | ||||
| @@ -71,7 +71,7 @@ class FakeYDL(YoutubeDL): | ||||
|             old_report_warning(message) | ||||
|         self.report_warning = types.MethodType(report_warning, self) | ||||
|  | ||||
| def get_testcases(): | ||||
| def gettestcases(): | ||||
|     for ie in youtube_dl.extractor.gen_extractors(): | ||||
|         t = getattr(ie, '_TEST', None) | ||||
|         if t: | ||||
|   | ||||
							
								
								
									
										44
									
								
								test/test_InfoExtractor.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								test/test_InfoExtractor.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| #!/usr/bin/env python | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| import sys | ||||
| import unittest | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
| from test.helper import FakeYDL | ||||
| from youtube_dl.extractor.common import InfoExtractor | ||||
| from youtube_dl.extractor import YoutubeIE, get_info_extractor | ||||
|  | ||||
|  | ||||
| class TestIE(InfoExtractor): | ||||
|     pass | ||||
|  | ||||
|  | ||||
| class TestInfoExtractor(unittest.TestCase): | ||||
|     def setUp(self): | ||||
|         self.ie = TestIE(FakeYDL()) | ||||
|  | ||||
|     def test_ie_key(self): | ||||
|         self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE) | ||||
|  | ||||
|     def test_html_search_regex(self): | ||||
|         html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>' | ||||
|         search = lambda re, *args: self.ie._html_search_regex(re, html, *args) | ||||
|         self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video') | ||||
|  | ||||
|     def test_opengraph(self): | ||||
|         ie = self.ie | ||||
|         html = ''' | ||||
|             <meta name="og:title" content='Foo'/> | ||||
|             <meta content="Some video's description " name="og:description"/> | ||||
|             <meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/> | ||||
|             ''' | ||||
|         self.assertEqual(ie._og_search_title(html), 'Foo') | ||||
|         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') | ||||
|         self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
| @@ -1,5 +1,7 @@ | ||||
| #!/usr/bin/env python | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| import sys | ||||
| @@ -8,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
| from test.helper import FakeYDL | ||||
| from youtube_dl import YoutubeDL | ||||
| from youtube_dl.extractor import YoutubeIE | ||||
|  | ||||
|  | ||||
| class YDL(FakeYDL): | ||||
| @@ -29,105 +32,209 @@ class TestFormatSelection(unittest.TestCase): | ||||
|         ydl = YDL() | ||||
|         ydl.params['prefer_free_formats'] = True | ||||
|         formats = [ | ||||
|             {u'ext': u'webm', u'height': 460}, | ||||
|             {u'ext': u'mp4',  u'height': 460}, | ||||
|             {'ext': 'webm', 'height': 460}, | ||||
|             {'ext': 'mp4',  'height': 460}, | ||||
|         ] | ||||
|         info_dict = {u'formats': formats, u'extractor': u'test'} | ||||
|         info_dict = {'formats': formats, 'extractor': 'test'} | ||||
|         yie = YoutubeIE(ydl) | ||||
|         yie._sort_formats(info_dict['formats']) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'ext'], u'webm') | ||||
|         self.assertEqual(downloaded['ext'], 'webm') | ||||
|  | ||||
|         # Different resolution => download best quality (mp4) | ||||
|         ydl = YDL() | ||||
|         ydl.params['prefer_free_formats'] = True | ||||
|         formats = [ | ||||
|             {u'ext': u'webm', u'height': 720}, | ||||
|             {u'ext': u'mp4', u'height': 1080}, | ||||
|             {'ext': 'webm', 'height': 720}, | ||||
|             {'ext': 'mp4', 'height': 1080}, | ||||
|         ] | ||||
|         info_dict[u'formats'] = formats | ||||
|         info_dict['formats'] = formats | ||||
|         yie = YoutubeIE(ydl) | ||||
|         yie._sort_formats(info_dict['formats']) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'ext'], u'mp4') | ||||
|         self.assertEqual(downloaded['ext'], 'mp4') | ||||
|  | ||||
|         # No prefer_free_formats => keep original formats order | ||||
|         # No prefer_free_formats => prefer mp4 and flv for greater compatibilty | ||||
|         ydl = YDL() | ||||
|         ydl.params['prefer_free_formats'] = False | ||||
|         formats = [ | ||||
|             {u'ext': u'webm', u'height': 720}, | ||||
|             {u'ext': u'flv', u'height': 720}, | ||||
|             {'ext': 'webm', 'height': 720}, | ||||
|             {'ext': 'mp4', 'height': 720}, | ||||
|             {'ext': 'flv', 'height': 720}, | ||||
|         ] | ||||
|         info_dict[u'formats'] = formats | ||||
|         info_dict['formats'] = formats | ||||
|         yie = YoutubeIE(ydl) | ||||
|         yie._sort_formats(info_dict['formats']) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'ext'], u'flv') | ||||
|         self.assertEqual(downloaded['ext'], 'mp4') | ||||
|  | ||||
|         ydl = YDL() | ||||
|         ydl.params['prefer_free_formats'] = False | ||||
|         formats = [ | ||||
|             {'ext': 'flv', 'height': 720}, | ||||
|             {'ext': 'webm', 'height': 720}, | ||||
|         ] | ||||
|         info_dict['formats'] = formats | ||||
|         yie = YoutubeIE(ydl) | ||||
|         yie._sort_formats(info_dict['formats']) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['ext'], 'flv') | ||||
|  | ||||
|     def test_format_limit(self): | ||||
|         formats = [ | ||||
|             {u'format_id': u'meh', u'url': u'http://example.com/meh'}, | ||||
|             {u'format_id': u'good', u'url': u'http://example.com/good'}, | ||||
|             {u'format_id': u'great', u'url': u'http://example.com/great'}, | ||||
|             {u'format_id': u'excellent', u'url': u'http://example.com/exc'}, | ||||
|             {'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1}, | ||||
|             {'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2}, | ||||
|             {'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3}, | ||||
|             {'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4}, | ||||
|         ] | ||||
|         info_dict = { | ||||
|             u'formats': formats, u'extractor': u'test', 'id': 'testvid'} | ||||
|             'formats': formats, 'extractor': 'test', 'id': 'testvid'} | ||||
|  | ||||
|         ydl = YDL() | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'format_id'], u'excellent') | ||||
|         self.assertEqual(downloaded['format_id'], 'excellent') | ||||
|  | ||||
|         ydl = YDL({'format_limit': 'good'}) | ||||
|         assert ydl.params['format_limit'] == 'good' | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'format_id'], u'good') | ||||
|         self.assertEqual(downloaded['format_id'], 'good') | ||||
|  | ||||
|         ydl = YDL({'format_limit': 'great', 'format': 'all'}) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh') | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good') | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great') | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh') | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good') | ||||
|         self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great') | ||||
|         self.assertTrue('3' in ydl.msgs[0]) | ||||
|  | ||||
|         ydl = YDL() | ||||
|         ydl.params['format_limit'] = 'excellent' | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded[u'format_id'], u'excellent') | ||||
|         self.assertEqual(downloaded['format_id'], 'excellent') | ||||
|  | ||||
|     def test_format_selection(self): | ||||
|         formats = [ | ||||
|             {u'format_id': u'35', u'ext': u'mp4'}, | ||||
|             {u'format_id': u'45', u'ext': u'webm'}, | ||||
|             {u'format_id': u'47', u'ext': u'webm'}, | ||||
|             {u'format_id': u'2', u'ext': u'flv'}, | ||||
|             {'format_id': '35', 'ext': 'mp4', 'preference': 1}, | ||||
|             {'format_id': '45', 'ext': 'webm', 'preference': 2}, | ||||
|             {'format_id': '47', 'ext': 'webm', 'preference': 3}, | ||||
|             {'format_id': '2', 'ext': 'flv', 'preference': 4}, | ||||
|         ] | ||||
|         info_dict = {u'formats': formats, u'extractor': u'test'} | ||||
|         info_dict = {'formats': formats, 'extractor': 'test'} | ||||
|  | ||||
|         ydl = YDL({'format': u'20/47'}) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl = YDL({'format': '20/47'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], u'47') | ||||
|         self.assertEqual(downloaded['format_id'], '47') | ||||
|  | ||||
|         ydl = YDL({'format': u'20/71/worst'}) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl = YDL({'format': '20/71/worst'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], u'35') | ||||
|         self.assertEqual(downloaded['format_id'], '35') | ||||
|  | ||||
|         ydl = YDL() | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], u'2') | ||||
|         self.assertEqual(downloaded['format_id'], '2') | ||||
|  | ||||
|         ydl = YDL({'format': u'webm/mp4'}) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl = YDL({'format': 'webm/mp4'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], u'47') | ||||
|         self.assertEqual(downloaded['format_id'], '47') | ||||
|  | ||||
|         ydl = YDL({'format': u'3gp/40/mp4'}) | ||||
|         ydl.process_ie_result(info_dict) | ||||
|         ydl = YDL({'format': '3gp/40/mp4'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], u'35') | ||||
|         self.assertEqual(downloaded['format_id'], '35') | ||||
|  | ||||
|     def test_format_selection_audio(self): | ||||
|         formats = [ | ||||
|             {'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none'}, | ||||
|             {'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none'}, | ||||
|             {'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none'}, | ||||
|             {'format_id': 'vid', 'ext': 'mp4', 'preference': 4}, | ||||
|         ] | ||||
|         info_dict = {'formats': formats, 'extractor': 'test'} | ||||
|  | ||||
|         ydl = YDL({'format': 'bestaudio'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], 'audio-high') | ||||
|  | ||||
|         ydl = YDL({'format': 'worstaudio'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], 'audio-low') | ||||
|  | ||||
|         formats = [ | ||||
|             {'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1}, | ||||
|             {'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2}, | ||||
|         ] | ||||
|         info_dict = {'formats': formats, 'extractor': 'test'} | ||||
|  | ||||
|         ydl = YDL({'format': 'bestaudio/worstaudio/best'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], 'vid-high') | ||||
|  | ||||
|     def test_format_selection_video(self): | ||||
|         formats = [ | ||||
|             {'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none'}, | ||||
|             {'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none'}, | ||||
|             {'format_id': 'vid', 'ext': 'mp4', 'preference': 3}, | ||||
|         ] | ||||
|         info_dict = {'formats': formats, 'extractor': 'test'} | ||||
|  | ||||
|         ydl = YDL({'format': 'bestvideo'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], 'dash-video-high') | ||||
|  | ||||
|         ydl = YDL({'format': 'worstvideo'}) | ||||
|         ydl.process_ie_result(info_dict.copy()) | ||||
|         downloaded = ydl.downloaded_info_dicts[0] | ||||
|         self.assertEqual(downloaded['format_id'], 'dash-video-low') | ||||
|  | ||||
|     def test_youtube_format_selection(self): | ||||
|         order = [ | ||||
|             '38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13', | ||||
|             # Apple HTTP Live Streaming | ||||
|             '96', '95', '94', '93', '92', '132', '151', | ||||
|             # 3D | ||||
|             '85', '84', '102', '83', '101', '82', '100', | ||||
|             # Dash video | ||||
|             '138', '137', '248', '136', '247', '135', '246', | ||||
|             '245', '244', '134', '243', '133', '242', '160', | ||||
|             # Dash audio | ||||
|             '141', '172', '140', '139', '171', | ||||
|         ] | ||||
|  | ||||
|         for f1id, f2id in zip(order, order[1:]): | ||||
|             f1 = YoutubeIE._formats[f1id].copy() | ||||
|             f1['format_id'] = f1id | ||||
|             f2 = YoutubeIE._formats[f2id].copy() | ||||
|             f2['format_id'] = f2id | ||||
|  | ||||
|             info_dict = {'formats': [f1, f2], 'extractor': 'youtube'} | ||||
|             ydl = YDL() | ||||
|             yie = YoutubeIE(ydl) | ||||
|             yie._sort_formats(info_dict['formats']) | ||||
|             ydl.process_ie_result(info_dict) | ||||
|             downloaded = ydl.downloaded_info_dicts[0] | ||||
|             self.assertEqual(downloaded['format_id'], f1id) | ||||
|  | ||||
|             info_dict = {'formats': [f2, f1], 'extractor': 'youtube'} | ||||
|             ydl = YDL() | ||||
|             yie = YoutubeIE(ydl) | ||||
|             yie._sort_formats(info_dict['formats']) | ||||
|             ydl.process_ie_result(info_dict) | ||||
|             downloaded = ydl.downloaded_info_dicts[0] | ||||
|             self.assertEqual(downloaded['format_id'], f1id) | ||||
|  | ||||
|     def test_add_extra_info(self): | ||||
|         test_dict = { | ||||
| @@ -143,17 +250,17 @@ class TestFormatSelection(unittest.TestCase): | ||||
|  | ||||
|     def test_prepare_filename(self): | ||||
|         info = { | ||||
|             u'id': u'1234', | ||||
|             u'ext': u'mp4', | ||||
|             u'width': None, | ||||
|             'id': '1234', | ||||
|             'ext': 'mp4', | ||||
|             'width': None, | ||||
|         } | ||||
|         def fname(templ): | ||||
|             ydl = YoutubeDL({'outtmpl': templ}) | ||||
|             return ydl.prepare_filename(info) | ||||
|         self.assertEqual(fname(u'%(id)s.%(ext)s'), u'1234.mp4') | ||||
|         self.assertEqual(fname(u'%(id)s-%(width)s.%(ext)s'), u'1234-NA.mp4') | ||||
|         self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4') | ||||
|         self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4') | ||||
|         # Replace missing fields with 'NA' | ||||
|         self.assertEqual(fname(u'%(uploader_date)s-%(id)s.%(ext)s'), u'NA-1234.mp4') | ||||
|         self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4') | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   | ||||
| @@ -1,5 +1,7 @@ | ||||
| #!/usr/bin/env python | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| import sys | ||||
| @@ -7,12 +9,13 @@ import unittest | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
|  | ||||
| from test.helper import get_testcases | ||||
| from test.helper import gettestcases | ||||
|  | ||||
| from youtube_dl.extractor import ( | ||||
|     FacebookIE, | ||||
|     gen_extractors, | ||||
|     JustinTVIE, | ||||
|     PBSIE, | ||||
|     YoutubeIE, | ||||
| ) | ||||
|  | ||||
| @@ -29,18 +32,20 @@ class TestAllURLsMatching(unittest.TestCase): | ||||
|  | ||||
|     def test_youtube_playlist_matching(self): | ||||
|         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) | ||||
|         assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||
|         assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585 | ||||
|         assertPlaylist(u'PL63F0C78739B09958') | ||||
|         assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') | ||||
|         assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||
|         assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') | ||||
|         assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 | ||||
|         self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M')) | ||||
|         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||
|         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585 | ||||
|         assertPlaylist('PL63F0C78739B09958') | ||||
|         assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') | ||||
|         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||
|         assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') | ||||
|         assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 | ||||
|         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) | ||||
|         # Top tracks | ||||
|         assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101') | ||||
|  | ||||
|     def test_youtube_matching(self): | ||||
|         self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M')) | ||||
|         self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 | ||||
|         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) | ||||
|         self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 | ||||
|         self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) | ||||
|         self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) | ||||
|         self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) | ||||
| @@ -63,6 +68,13 @@ class TestAllURLsMatching(unittest.TestCase): | ||||
|     def test_youtube_show_matching(self): | ||||
|         self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show']) | ||||
|  | ||||
|     def test_youtube_truncated(self): | ||||
|         self.assertMatch('http://www.youtube.com/watch?', ['youtube:truncated_url']) | ||||
|  | ||||
|     def test_youtube_search_matching(self): | ||||
|         self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url']) | ||||
|         self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) | ||||
|  | ||||
|     def test_justin_tv_channelid_matching(self): | ||||
|         self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv")) | ||||
|         self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv")) | ||||
| @@ -80,7 +92,7 @@ class TestAllURLsMatching(unittest.TestCase): | ||||
|         self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361")) | ||||
|  | ||||
|     def test_youtube_extract(self): | ||||
|         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id) | ||||
|         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) | ||||
|         assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') | ||||
|         assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') | ||||
|         assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc') | ||||
| @@ -89,11 +101,11 @@ class TestAllURLsMatching(unittest.TestCase): | ||||
|         assertExtractId('BaW_jenozKc', 'BaW_jenozKc') | ||||
|  | ||||
|     def test_facebook_matching(self): | ||||
|         self.assertTrue(FacebookIE.suitable(u'https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268')) | ||||
|         self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268')) | ||||
|  | ||||
|     def test_no_duplicates(self): | ||||
|         ies = gen_extractors() | ||||
|         for tc in get_testcases(): | ||||
|         for tc in gettestcases(): | ||||
|             url = tc['url'] | ||||
|             for ie in ies: | ||||
|                 if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'): | ||||
| @@ -112,11 +124,24 @@ class TestAllURLsMatching(unittest.TestCase): | ||||
|  | ||||
|     def test_vimeo_matching(self): | ||||
|         self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel']) | ||||
|         self.assertMatch('http://vimeo.com/channels/31259', ['vimeo:channel']) | ||||
|         self.assertMatch('http://vimeo.com/channels/31259/53576664', ['vimeo']) | ||||
|         self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user']) | ||||
|         self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user']) | ||||
|         self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review']) | ||||
|  | ||||
|     # https://github.com/rg3/youtube-dl/issues/1930 | ||||
|     def test_soundcloud_not_matching_sets(self): | ||||
|         self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set']) | ||||
|  | ||||
|     def test_tumblr(self): | ||||
|         self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr']) | ||||
|         self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr']) | ||||
|  | ||||
|     def test_pbs(self): | ||||
|         # https://github.com/rg3/youtube-dl/issues/2350 | ||||
|         self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS']) | ||||
|         self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS']) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
|   | ||||
| @@ -8,7 +8,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
| from test.helper import ( | ||||
|     get_params, | ||||
|     get_testcases, | ||||
|     gettestcases, | ||||
|     try_rm, | ||||
|     md5, | ||||
|     report_warning | ||||
| @@ -18,10 +18,12 @@ from test.helper import ( | ||||
| import hashlib | ||||
| import io | ||||
| import json | ||||
| import re | ||||
| import socket | ||||
|  | ||||
| import youtube_dl.YoutubeDL | ||||
| from youtube_dl.utils import ( | ||||
|     compat_http_client, | ||||
|     compat_str, | ||||
|     compat_urllib_error, | ||||
|     compat_HTTPError, | ||||
| @@ -49,7 +51,7 @@ def _file_md5(fn): | ||||
|     with open(fn, 'rb') as f: | ||||
|         return hashlib.md5(f.read()).hexdigest() | ||||
|  | ||||
| defs = get_testcases() | ||||
| defs = gettestcases() | ||||
|  | ||||
|  | ||||
| class TestDownload(unittest.TestCase): | ||||
| @@ -71,9 +73,7 @@ def generator(test_case): | ||||
|         if 'playlist' not in test_case: | ||||
|             info_dict = test_case.get('info_dict', {}) | ||||
|             if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')): | ||||
|                 print_skipping('The output file cannot be know, the "file" ' | ||||
|                     'key is missing or the info_dict is incomplete') | ||||
|                 return | ||||
|                 raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?') | ||||
|         if 'skip' in test_case: | ||||
|             print_skipping(test_case['skip']) | ||||
|             return | ||||
| @@ -90,7 +90,7 @@ def generator(test_case): | ||||
|         def _hook(status): | ||||
|             if status['status'] == 'finished': | ||||
|                 finished_hook_called.add(status['filename']) | ||||
|         ydl.fd.add_progress_hook(_hook) | ||||
|         ydl.add_progress_hook(_hook) | ||||
|  | ||||
|         def get_tc_filename(tc): | ||||
|             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {})) | ||||
| @@ -110,7 +110,7 @@ def generator(test_case): | ||||
|                     ydl.download([test_case['url']]) | ||||
|                 except (DownloadError, ExtractorError) as err: | ||||
|                     # Check if the exception is not a network related one | ||||
|                     if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): | ||||
|                     if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): | ||||
|                         raise | ||||
|  | ||||
|                     if try_num == RETRIES: | ||||
| @@ -136,19 +136,25 @@ def generator(test_case): | ||||
|                 with io.open(info_json_fn, encoding='utf-8') as infof: | ||||
|                     info_dict = json.load(infof) | ||||
|                 for (info_field, expected) in tc.get('info_dict', {}).items(): | ||||
|                     if isinstance(expected, compat_str) and expected.startswith('md5:'): | ||||
|                         got = 'md5:' + md5(info_dict.get(info_field)) | ||||
|                     else: | ||||
|                     if isinstance(expected, compat_str) and expected.startswith('re:'): | ||||
|                         got = info_dict.get(info_field) | ||||
|                     self.assertEqual(expected, got, | ||||
|                         u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) | ||||
|                         match_str = expected[len('re:'):] | ||||
|                         match_rex = re.compile(match_str) | ||||
|  | ||||
|                 # If checkable fields are missing from the test case, print the info_dict | ||||
|                 test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) | ||||
|                     for key, value in info_dict.items() | ||||
|                     if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location')) | ||||
|                 if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()): | ||||
|                     sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n') | ||||
|                         self.assertTrue( | ||||
|                             isinstance(got, compat_str) and match_rex.match(got), | ||||
|                             u'field %s (value: %r) should match %r' % (info_field, got, match_str)) | ||||
|                     elif isinstance(expected, type): | ||||
|                         got = info_dict.get(info_field) | ||||
|                         self.assertTrue(isinstance(got, expected), | ||||
|                             u'Expected type %r, but got value %r of type %r' % (expected, got, type(got))) | ||||
|                     else: | ||||
|                         if isinstance(expected, compat_str) and expected.startswith('md5:'): | ||||
|                             got = 'md5:' + md5(info_dict.get(info_field)) | ||||
|                         else: | ||||
|                             got = info_dict.get(info_field) | ||||
|                         self.assertEqual(expected, got, | ||||
|                             u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) | ||||
|  | ||||
|                 # Check for the presence of mandatory fields | ||||
|                 for key in ('id', 'url', 'title', 'ext'): | ||||
| @@ -156,6 +162,13 @@ def generator(test_case): | ||||
|                 # Check for mandatory fields that are automatically set by YoutubeDL | ||||
|                 for key in ['webpage_url', 'extractor', 'extractor_key']: | ||||
|                     self.assertTrue(info_dict.get(key), u'Missing field: %s' % key) | ||||
|  | ||||
|                 # If checkable fields are missing from the test case, print the info_dict | ||||
|                 test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) | ||||
|                     for key, value in info_dict.items() | ||||
|                     if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) | ||||
|                 if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()): | ||||
|                     sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n') | ||||
|         finally: | ||||
|             try_rm_tcs_files() | ||||
|  | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| #!/usr/bin/env python | ||||
| # encoding: utf-8 | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| @@ -28,7 +29,14 @@ from youtube_dl.extractor import ( | ||||
|     BandcampAlbumIE, | ||||
|     SmotriCommunityIE, | ||||
|     SmotriUserIE, | ||||
|     IviCompilationIE | ||||
|     IviCompilationIE, | ||||
|     ImdbListIE, | ||||
|     KhanAcademyIE, | ||||
|     EveryonesMixtapeIE, | ||||
|     RutubeChannelIE, | ||||
|     GoogleSearchIE, | ||||
|     GenericIE, | ||||
|     TEDIE, | ||||
| ) | ||||
|  | ||||
|  | ||||
| @@ -42,23 +50,23 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = DailymotionPlaylistIE(dl) | ||||
|         result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'SPORT') | ||||
|         self.assertEqual(result['title'], 'SPORT') | ||||
|         self.assertTrue(len(result['entries']) > 20) | ||||
|  | ||||
|     def test_dailymotion_user(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = DailymotionUserIE(dl) | ||||
|         result = ie.extract('http://www.dailymotion.com/user/generation-quoi/') | ||||
|         result = ie.extract('https://www.dailymotion.com/user/nqtv') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Génération Quoi') | ||||
|         self.assertTrue(len(result['entries']) >= 26) | ||||
|         self.assertEqual(result['title'], 'Rémi Gaillard') | ||||
|         self.assertTrue(len(result['entries']) >= 100) | ||||
|  | ||||
|     def test_vimeo_channel(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = VimeoChannelIE(dl) | ||||
|         result = ie.extract('http://vimeo.com/channels/tributes') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Vimeo Tributes') | ||||
|         self.assertEqual(result['title'], 'Vimeo Tributes') | ||||
|         self.assertTrue(len(result['entries']) > 24) | ||||
|  | ||||
|     def test_vimeo_user(self): | ||||
| @@ -66,7 +74,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = VimeoUserIE(dl) | ||||
|         result = ie.extract('http://vimeo.com/nkistudio/videos') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Nki') | ||||
|         self.assertEqual(result['title'], 'Nki') | ||||
|         self.assertTrue(len(result['entries']) > 65) | ||||
|  | ||||
|     def test_vimeo_album(self): | ||||
| @@ -74,7 +82,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = VimeoAlbumIE(dl) | ||||
|         result = ie.extract('http://vimeo.com/album/2632481') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Staff Favorites: November 2013') | ||||
|         self.assertEqual(result['title'], 'Staff Favorites: November 2013') | ||||
|         self.assertTrue(len(result['entries']) > 12) | ||||
|  | ||||
|     def test_vimeo_groups(self): | ||||
| @@ -82,7 +90,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = VimeoGroupsIE(dl) | ||||
|         result = ie.extract('http://vimeo.com/groups/rolexawards') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Rolex Awards for Enterprise') | ||||
|         self.assertEqual(result['title'], 'Rolex Awards for Enterprise') | ||||
|         self.assertTrue(len(result['entries']) > 72) | ||||
|  | ||||
|     def test_ustream_channel(self): | ||||
| @@ -90,15 +98,15 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = UstreamChannelIE(dl) | ||||
|         result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'5124905') | ||||
|         self.assertTrue(len(result['entries']) >= 11) | ||||
|         self.assertEqual(result['id'], '5124905') | ||||
|         self.assertTrue(len(result['entries']) >= 6) | ||||
|  | ||||
|     def test_soundcloud_set(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = SoundcloudSetIE(dl) | ||||
|         result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'The Royal Concept EP') | ||||
|         self.assertEqual(result['title'], 'The Royal Concept EP') | ||||
|         self.assertTrue(len(result['entries']) >= 6) | ||||
|  | ||||
|     def test_soundcloud_user(self): | ||||
| @@ -106,7 +114,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = SoundcloudUserIE(dl) | ||||
|         result = ie.extract('https://soundcloud.com/the-concept-band') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'9615865') | ||||
|         self.assertEqual(result['id'], '9615865') | ||||
|         self.assertTrue(len(result['entries']) >= 12) | ||||
|  | ||||
|     def test_livestream_event(self): | ||||
| @@ -114,7 +122,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = LivestreamIE(dl) | ||||
|         result = ie.extract('http://new.livestream.com/tedx/cityenglish') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'TEDCity2.0 (English)') | ||||
|         self.assertEqual(result['title'], 'TEDCity2.0 (English)') | ||||
|         self.assertTrue(len(result['entries']) >= 4) | ||||
|  | ||||
|     def test_nhl_videocenter(self): | ||||
| @@ -122,8 +130,8 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = NHLVideocenterIE(dl) | ||||
|         result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'999') | ||||
|         self.assertEqual(result['title'], u'Highlights') | ||||
|         self.assertEqual(result['id'], '999') | ||||
|         self.assertEqual(result['title'], 'Highlights') | ||||
|         self.assertEqual(len(result['entries']), 12) | ||||
|  | ||||
|     def test_bambuser_channel(self): | ||||
| @@ -131,7 +139,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = BambuserChannelIE(dl) | ||||
|         result = ie.extract('http://bambuser.com/channel/pixelversity') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'pixelversity') | ||||
|         self.assertEqual(result['title'], 'pixelversity') | ||||
|         self.assertTrue(len(result['entries']) >= 60) | ||||
|  | ||||
|     def test_bandcamp_album(self): | ||||
| @@ -139,7 +147,7 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = BandcampAlbumIE(dl) | ||||
|         result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], u'Nightmare Night EP') | ||||
|         self.assertEqual(result['title'], 'Nightmare Night EP') | ||||
|         self.assertTrue(len(result['entries']) >= 4) | ||||
|          | ||||
|     def test_smotri_community(self): | ||||
| @@ -147,8 +155,8 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = SmotriCommunityIE(dl) | ||||
|         result = ie.extract('http://smotri.com/community/video/kommuna') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'kommuna') | ||||
|         self.assertEqual(result['title'], u'КПРФ') | ||||
|         self.assertEqual(result['id'], 'kommuna') | ||||
|         self.assertEqual(result['title'], 'КПРФ') | ||||
|         self.assertTrue(len(result['entries']) >= 4) | ||||
|          | ||||
|     def test_smotri_user(self): | ||||
| @@ -156,27 +164,27 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = SmotriUserIE(dl) | ||||
|         result = ie.extract('http://smotri.com/user/inspector') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'inspector') | ||||
|         self.assertEqual(result['title'], u'Inspector') | ||||
|         self.assertEqual(result['id'], 'inspector') | ||||
|         self.assertEqual(result['title'], 'Inspector') | ||||
|         self.assertTrue(len(result['entries']) >= 9) | ||||
|  | ||||
|     def test_AcademicEarthCourse(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = AcademicEarthCourseIE(dl) | ||||
|         result = ie.extract(u'http://academicearth.org/courses/building-dynamic-websites/') | ||||
|         result = ie.extract('http://academicearth.org/playlists/laws-of-nature/') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'building-dynamic-websites') | ||||
|         self.assertEqual(result['title'], u'Building Dynamic Websites') | ||||
|         self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.") | ||||
|         self.assertEqual(len(result['entries']), 10) | ||||
|         self.assertEqual(result['id'], 'laws-of-nature') | ||||
|         self.assertEqual(result['title'], 'Laws of Nature') | ||||
|         self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.") | ||||
|         self.assertEqual(len(result['entries']), 4) | ||||
|          | ||||
|     def test_ivi_compilation(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = IviCompilationIE(dl) | ||||
|         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'dezhurnyi_angel') | ||||
|         self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012)') | ||||
|         self.assertEqual(result['id'], 'dezhurnyi_angel') | ||||
|         self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)') | ||||
|         self.assertTrue(len(result['entries']) >= 36) | ||||
|          | ||||
|     def test_ivi_compilation_season(self): | ||||
| @@ -184,10 +192,82 @@ class TestPlaylists(unittest.TestCase): | ||||
|         ie = IviCompilationIE(dl) | ||||
|         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], u'dezhurnyi_angel/season2') | ||||
|         self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012) 2 сезон') | ||||
|         self.assertEqual(result['id'], 'dezhurnyi_angel/season2') | ||||
|         self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон') | ||||
|         self.assertTrue(len(result['entries']) >= 20) | ||||
|          | ||||
|     def test_imdb_list(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = ImdbListIE(dl) | ||||
|         result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'JFs9NWw6XI0') | ||||
|         self.assertEqual(result['title'], 'March 23, 2012 Releases') | ||||
|         self.assertEqual(len(result['entries']), 7) | ||||
|  | ||||
|     def test_khanacademy_topic(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = KhanAcademyIE(dl) | ||||
|         result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'cryptography') | ||||
|         self.assertEqual(result['title'], 'Journey into cryptography') | ||||
|         self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?') | ||||
|         self.assertTrue(len(result['entries']) >= 3) | ||||
|  | ||||
|     def test_EveryonesMixtape(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = EveryonesMixtapeIE(dl) | ||||
|         result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'm7m0jJAbMQi') | ||||
|         self.assertEqual(result['title'], 'Driving') | ||||
|         self.assertEqual(len(result['entries']), 24) | ||||
|          | ||||
|     def test_rutube_channel(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = RutubeChannelIE(dl) | ||||
|         result = ie.extract('http://rutube.ru/tags/video/1409') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], '1409') | ||||
|         self.assertTrue(len(result['entries']) >= 34) | ||||
|  | ||||
|     def test_multiple_brightcove_videos(self): | ||||
|         # https://github.com/rg3/youtube-dl/issues/2283 | ||||
|         dl = FakeYDL() | ||||
|         ie = GenericIE(dl) | ||||
|         result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'always-never-nuclear-command-and-control') | ||||
|         self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker') | ||||
|         self.assertEqual(len(result['entries']), 3) | ||||
|  | ||||
|     def test_GoogleSearch(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = GoogleSearchIE(dl) | ||||
|         result = ie.extract('gvsearch15:python language') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'python language') | ||||
|         self.assertEqual(result['title'], 'python language') | ||||
|         self.assertEqual(len(result['entries']), 15) | ||||
|  | ||||
|     def test_generic_rss_feed(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = GenericIE(dl) | ||||
|         result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml') | ||||
|         self.assertEqual(result['title'], 'Zero Punctuation') | ||||
|         self.assertTrue(len(result['entries']) > 10) | ||||
|  | ||||
|     def test_ted_playlist(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = TEDIE(dl) | ||||
|         result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['id'], '10') | ||||
|         self.assertEqual(result['title'], 'Who are the hackers?') | ||||
|         self.assertTrue(len(result['entries']) >= 6) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
|   | ||||
| @@ -10,9 +10,11 @@ from test.helper import FakeYDL, md5 | ||||
|  | ||||
|  | ||||
| from youtube_dl.extractor import ( | ||||
|     BlipTVIE, | ||||
|     YoutubeIE, | ||||
|     DailymotionIE, | ||||
|     TEDIE, | ||||
|     VimeoIE, | ||||
| ) | ||||
|  | ||||
|  | ||||
| @@ -36,10 +38,6 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | ||||
|     url = 'QRS8MkLhQmM' | ||||
|     IE = YoutubeIE | ||||
|  | ||||
|     def getSubtitles(self): | ||||
|         info_dict = self.getInfoDict() | ||||
|         return info_dict[0]['subtitles'] | ||||
|  | ||||
|     def test_youtube_no_writesubtitles(self): | ||||
|         self.DL.params['writesubtitles'] = False | ||||
|         subtitles = self.getSubtitles() | ||||
| @@ -171,13 +169,13 @@ class TestTedSubtitles(BaseTestSubtitles): | ||||
|     def test_subtitles(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d') | ||||
|         self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14') | ||||
|  | ||||
|     def test_subtitles_lang(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['subtitleslangs'] = ['fr'] | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6') | ||||
|         self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5') | ||||
|  | ||||
|     def test_allsubtitles(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
| @@ -206,5 +204,80 @@ class TestTedSubtitles(BaseTestSubtitles): | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| class TestBlipTVSubtitles(BaseTestSubtitles): | ||||
|     url = 'http://blip.tv/a/a-6603250' | ||||
|     IE = BlipTVIE | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_allsubtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(set(subtitles.keys()), set(['en'])) | ||||
|         self.assertEqual(md5(subtitles['en']), '5b75c300af65fe4476dff79478bb93e4') | ||||
|  | ||||
|  | ||||
| class TestVimeoSubtitles(BaseTestSubtitles): | ||||
|     url = 'http://vimeo.com/76979871' | ||||
|     IE = VimeoIE | ||||
|  | ||||
|     def test_no_writesubtitles(self): | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(subtitles, None) | ||||
|  | ||||
|     def test_subtitles(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888') | ||||
|  | ||||
|     def test_subtitles_lang(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['subtitleslangs'] = ['fr'] | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8') | ||||
|  | ||||
|     def test_allsubtitles(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_automatic_captions(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.params['writeautomaticsub'] = True | ||||
|         self.DL.params['subtitleslang'] = ['en'] | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertTrue(len(subtitles.keys()) == 0) | ||||
|  | ||||
|     def test_nosubtitles(self): | ||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') | ||||
|         self.url = 'http://vimeo.com/56015672' | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertEqual(len(subtitles), 0) | ||||
|  | ||||
|     def test_multiple_langs(self): | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         langs = ['es', 'fr', 'de'] | ||||
|         self.DL.params['subtitleslangs'] = langs | ||||
|         subtitles = self.getSubtitles() | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
|   | ||||
							
								
								
									
										47
									
								
								test/test_unicode_literals.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								test/test_unicode_literals.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import io | ||||
| import os | ||||
| import re | ||||
| import unittest | ||||
|  | ||||
| rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||||
|  | ||||
| IGNORED_FILES = [ | ||||
|     'setup.py',  # http://bugs.python.org/issue13943 | ||||
| ] | ||||
|  | ||||
|  | ||||
| class TestUnicodeLiterals(unittest.TestCase): | ||||
|     def test_all_files(self): | ||||
|         print('Skipping this test (not yet fully implemented)') | ||||
|         return | ||||
|  | ||||
|         for dirpath, _, filenames in os.walk(rootDir): | ||||
|             for basename in filenames: | ||||
|                 if not basename.endswith('.py'): | ||||
|                     continue | ||||
|                 if basename in IGNORED_FILES: | ||||
|                     continue | ||||
|  | ||||
|                 fn = os.path.join(dirpath, basename) | ||||
|                 with io.open(fn, encoding='utf-8') as inf: | ||||
|                     code = inf.read() | ||||
|  | ||||
|                 if "'" not in code and '"' not in code: | ||||
|                     continue | ||||
|                 imps = 'from __future__ import unicode_literals' | ||||
|                 self.assertTrue( | ||||
|                     imps in code, | ||||
|                     ' %s  missing in %s' % (imps, fn)) | ||||
|  | ||||
|                 m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) | ||||
|                 if m is not None: | ||||
|                     self.assertTrue( | ||||
|                         m is None, | ||||
|                         'u present in %s, around %s' % ( | ||||
|                             fn, code[m.start() - 10:m.end() + 10])) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
| @@ -9,6 +9,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
|  | ||||
|  | ||||
| # Various small unit tests | ||||
| import io | ||||
| import xml.etree.ElementTree | ||||
|  | ||||
| #from youtube_dl.utils import htmlentity_transform | ||||
| @@ -16,17 +17,23 @@ from youtube_dl.utils import ( | ||||
|     DateRange, | ||||
|     encodeFilename, | ||||
|     find_xpath_attr, | ||||
|     fix_xml_ampersands, | ||||
|     get_meta_content, | ||||
|     orderedSet, | ||||
|     PagedList, | ||||
|     parse_duration, | ||||
|     read_batch_urls, | ||||
|     sanitize_filename, | ||||
|     shell_quote, | ||||
|     smuggle_url, | ||||
|     str_to_int, | ||||
|     struct_unpack, | ||||
|     timeconvert, | ||||
|     unescapeHTML, | ||||
|     unified_strdate, | ||||
|     unsmuggle_url, | ||||
|     url_basename, | ||||
|     urlencode_postdata, | ||||
|     xpath_with_ns, | ||||
| ) | ||||
|  | ||||
| @@ -124,6 +131,7 @@ class TestUtil(unittest.TestCase): | ||||
|         self.assertEqual(unified_strdate('8/7/2009'), '20090708') | ||||
|         self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') | ||||
|         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') | ||||
|         self.assertEqual(unified_strdate('1968-12-10'), '19681210') | ||||
|  | ||||
|     def test_find_xpath_attr(self): | ||||
|         testxml = u'''<root> | ||||
| @@ -192,5 +200,71 @@ class TestUtil(unittest.TestCase): | ||||
|             url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'), | ||||
|             u'trailer.mp4') | ||||
|  | ||||
|     def test_parse_duration(self): | ||||
|         self.assertEqual(parse_duration(None), None) | ||||
|         self.assertEqual(parse_duration('1'), 1) | ||||
|         self.assertEqual(parse_duration('1337:12'), 80232) | ||||
|         self.assertEqual(parse_duration('9:12:43'), 33163) | ||||
|         self.assertEqual(parse_duration('12:00'), 720) | ||||
|         self.assertEqual(parse_duration('00:01:01'), 61) | ||||
|         self.assertEqual(parse_duration('x:y'), None) | ||||
|         self.assertEqual(parse_duration('3h11m53s'), 11513) | ||||
|         self.assertEqual(parse_duration('62m45s'), 3765) | ||||
|         self.assertEqual(parse_duration('6m59s'), 419) | ||||
|         self.assertEqual(parse_duration('49s'), 49) | ||||
|         self.assertEqual(parse_duration('0h0m0s'), 0) | ||||
|         self.assertEqual(parse_duration('0m0s'), 0) | ||||
|         self.assertEqual(parse_duration('0s'), 0) | ||||
|  | ||||
|     def test_fix_xml_ampersands(self): | ||||
|         self.assertEqual( | ||||
|             fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a') | ||||
|         self.assertEqual( | ||||
|             fix_xml_ampersands('"&x=y&wrong;&z=a'), | ||||
|             '"&x=y&wrong;&z=a') | ||||
|         self.assertEqual( | ||||
|             fix_xml_ampersands('&'><"'), | ||||
|             '&'><"') | ||||
|         self.assertEqual( | ||||
|             fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼') | ||||
|         self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#') | ||||
|  | ||||
|     def test_paged_list(self): | ||||
|         def testPL(size, pagesize, sliceargs, expected): | ||||
|             def get_page(pagenum): | ||||
|                 firstid = pagenum * pagesize | ||||
|                 upto = min(size, pagenum * pagesize + pagesize) | ||||
|                 for i in range(firstid, upto): | ||||
|                     yield i | ||||
|  | ||||
|             pl = PagedList(get_page, pagesize) | ||||
|             got = pl.getslice(*sliceargs) | ||||
|             self.assertEqual(got, expected) | ||||
|  | ||||
|         testPL(5, 2, (), [0, 1, 2, 3, 4]) | ||||
|         testPL(5, 2, (1,), [1, 2, 3, 4]) | ||||
|         testPL(5, 2, (2,), [2, 3, 4]) | ||||
|         testPL(5, 2, (4,), [4]) | ||||
|         testPL(5, 2, (0, 3), [0, 1, 2]) | ||||
|         testPL(5, 2, (1, 4), [1, 2, 3]) | ||||
|         testPL(5, 2, (2, 99), [2, 3, 4]) | ||||
|         testPL(5, 2, (20, 99), []) | ||||
|  | ||||
|     def test_struct_unpack(self): | ||||
|         self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,)) | ||||
|  | ||||
|     def test_read_batch_urls(self): | ||||
|         f = io.StringIO(u'''\xef\xbb\xbf foo | ||||
|             bar\r | ||||
|             baz | ||||
|             # More after this line\r | ||||
|             ; or after this | ||||
|             bam''') | ||||
|         self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam']) | ||||
|  | ||||
|     def test_urlencode_postdata(self): | ||||
|         data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'}) | ||||
|         self.assertTrue(isinstance(data, bytes)) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
|   | ||||
| @@ -16,6 +16,7 @@ from youtube_dl.extractor import ( | ||||
|     YoutubeChannelIE, | ||||
|     YoutubeShowIE, | ||||
|     YoutubeTopListIE, | ||||
|     YoutubeSearchURLIE, | ||||
| ) | ||||
|  | ||||
|  | ||||
| @@ -30,7 +31,7 @@ class TestYoutubeLists(unittest.TestCase): | ||||
|         result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], 'ytdl test PL') | ||||
|         ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']] | ||||
|         ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']] | ||||
|         self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE']) | ||||
|  | ||||
|     def test_youtube_playlist_noplaylist(self): | ||||
| @@ -39,7 +40,7 @@ class TestYoutubeLists(unittest.TestCase): | ||||
|         ie = YoutubePlaylistIE(dl) | ||||
|         result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') | ||||
|         self.assertEqual(result['_type'], 'url') | ||||
|         self.assertEqual(YoutubeIE()._extract_id(result['url']), 'FXxLjLQi3Fg') | ||||
|         self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') | ||||
|  | ||||
|     def test_issue_673(self): | ||||
|         dl = FakeYDL() | ||||
| @@ -59,7 +60,7 @@ class TestYoutubeLists(unittest.TestCase): | ||||
|         dl = FakeYDL() | ||||
|         ie = YoutubePlaylistIE(dl) | ||||
|         result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') | ||||
|         ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']] | ||||
|         ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']] | ||||
|         self.assertFalse('pElCt5oNDuI' in ytie_results) | ||||
|         self.assertFalse('KdPEApIVdWM' in ytie_results) | ||||
|          | ||||
| @@ -76,9 +77,9 @@ class TestYoutubeLists(unittest.TestCase): | ||||
|         # TODO find a > 100 (paginating?) videos course | ||||
|         result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') | ||||
|         entries = result['entries'] | ||||
|         self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs') | ||||
|         self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs') | ||||
|         self.assertEqual(len(entries), 25) | ||||
|         self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0') | ||||
|         self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0') | ||||
|  | ||||
|     def test_youtube_channel(self): | ||||
|         dl = FakeYDL() | ||||
| @@ -117,12 +118,30 @@ class TestYoutubeLists(unittest.TestCase): | ||||
|         original_video = entries[0] | ||||
|         self.assertEqual(original_video['id'], 'rjFaenf1T-Y') | ||||
|  | ||||
|     def test_youtube_toptracks(self): | ||||
|         print('Skipping: The playlist page gives error 500') | ||||
|         return | ||||
|         dl = FakeYDL() | ||||
|         ie = YoutubePlaylistIE(dl) | ||||
|         result = ie.extract('https://www.youtube.com/playlist?list=MCUS') | ||||
|         entries = result['entries'] | ||||
|         self.assertEqual(len(entries), 100) | ||||
|  | ||||
|     def test_youtube_toplist(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = YoutubeTopListIE(dl) | ||||
|         result = ie.extract('yttoplist:music:Top Tracks') | ||||
|         result = ie.extract('yttoplist:music:Trending') | ||||
|         entries = result['entries'] | ||||
|         self.assertTrue(len(entries) >= 5) | ||||
|  | ||||
|     def test_youtube_search_url(self): | ||||
|         dl = FakeYDL() | ||||
|         ie = YoutubeSearchURLIE(dl) | ||||
|         result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video') | ||||
|         entries = result['entries'] | ||||
|         self.assertIsPlaylist(result) | ||||
|         self.assertEqual(result['title'], 'youtube-dl test video') | ||||
|         self.assertTrue(len(entries) >= 5) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
|   | ||||
| @@ -28,10 +28,10 @@ _TESTS = [ | ||||
|         u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@', | ||||
|     ), | ||||
|     ( | ||||
|         u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf', | ||||
|         u'swf', | ||||
|         82, | ||||
|         u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321' | ||||
|         u'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js', | ||||
|         u'js', | ||||
|         90, | ||||
|         u']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876', | ||||
|     ), | ||||
| ] | ||||
|  | ||||
|   | ||||
							
								
								
									
										24
									
								
								youtube-dl.plugin.zsh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								youtube-dl.plugin.zsh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| # This allows the youtube-dl command to be installed in ZSH using antigen. | ||||
| # Antigen is a bundle manager. It allows you to enhance the functionality of | ||||
| # your zsh session by installing bundles and themes easily. | ||||
|  | ||||
| # Antigen documentation: | ||||
| # http://antigen.sharats.me/ | ||||
| # https://github.com/zsh-users/antigen | ||||
|  | ||||
| # Install youtube-dl: | ||||
| # antigen bundle rg3/youtube-dl | ||||
| # Bundles installed by antigen are available for use immediately. | ||||
|  | ||||
| # Update youtube-dl (and all other antigen bundles): | ||||
| # antigen update | ||||
|  | ||||
| # The antigen command will download the git repository to a folder and then | ||||
| # execute an enabling script (this file). The complete process for loading the | ||||
| # code is documented here: | ||||
| # https://github.com/zsh-users/antigen#notes-on-writing-plugins | ||||
|  | ||||
| # This specific script just aliases youtube-dl to the python script that this | ||||
| # library provides. This requires updating the PYTHONPATH to ensure that the | ||||
| # full set of code can be located. | ||||
| alias youtube-dl="PYTHONPATH=$(dirname $0) $(dirname $0)/bin/youtube-dl" | ||||
| @@ -1,724 +1,12 @@ | ||||
| import os | ||||
| import re | ||||
| import subprocess | ||||
| import sys | ||||
| import time | ||||
|  | ||||
| from .utils import ( | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_request, | ||||
|     ContentTooShortError, | ||||
|     determine_ext, | ||||
|     encodeFilename, | ||||
|     format_bytes, | ||||
|     sanitize_open, | ||||
|     timeconvert, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FileDownloader(object): | ||||
|     """File Downloader class. | ||||
|  | ||||
|     File downloader objects are the ones responsible of downloading the | ||||
|     actual video file and writing it to disk. | ||||
|  | ||||
|     File downloaders accept a lot of parameters. In order not to saturate | ||||
|     the object constructor with arguments, it receives a dictionary of | ||||
|     options instead. | ||||
|  | ||||
|     Available options: | ||||
|  | ||||
|     verbose:           Print additional info to stdout. | ||||
|     quiet:             Do not print messages to stdout. | ||||
|     ratelimit:         Download speed limit, in bytes/sec. | ||||
|     retries:           Number of times to retry for HTTP error 5xx | ||||
|     buffersize:        Size of download buffer in bytes. | ||||
|     noresizebuffer:    Do not automatically resize the download buffer. | ||||
|     continuedl:        Try to continue downloads if possible. | ||||
|     noprogress:        Do not print the progress bar. | ||||
|     logtostderr:       Log messages to stderr instead of stdout. | ||||
|     consoletitle:      Display progress in console window's titlebar. | ||||
|     nopart:            Do not use temporary .part files. | ||||
|     updatetime:        Use the Last-modified header to set output file timestamps. | ||||
|     test:              Download only first bytes to test the downloader. | ||||
|     min_filesize:      Skip files smaller than this size | ||||
|     max_filesize:      Skip files larger than this size | ||||
|     """ | ||||
|  | ||||
|     params = None | ||||
|  | ||||
|     def __init__(self, ydl, params): | ||||
|         """Create a FileDownloader object with the given options.""" | ||||
|         self.ydl = ydl | ||||
|         self._progress_hooks = [] | ||||
|         self.params = params | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_seconds(seconds): | ||||
|         (mins, secs) = divmod(seconds, 60) | ||||
|         (hours, mins) = divmod(mins, 60) | ||||
|         if hours > 99: | ||||
|             return '--:--:--' | ||||
|         if hours == 0: | ||||
|             return '%02d:%02d' % (mins, secs) | ||||
|         else: | ||||
|             return '%02d:%02d:%02d' % (hours, mins, secs) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_percent(byte_counter, data_len): | ||||
|         if data_len is None: | ||||
|             return None | ||||
|         return float(byte_counter) / float(data_len) * 100.0 | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_percent(percent): | ||||
|         if percent is None: | ||||
|             return '---.-%' | ||||
|         return '%6s' % ('%3.1f%%' % percent) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_eta(start, now, total, current): | ||||
|         if total is None: | ||||
|             return None | ||||
|         dif = now - start | ||||
|         if current == 0 or dif < 0.001: # One millisecond | ||||
|             return None | ||||
|         rate = float(current) / dif | ||||
|         return int((float(total) - float(current)) / rate) | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_eta(eta): | ||||
|         if eta is None: | ||||
|             return '--:--' | ||||
|         return FileDownloader.format_seconds(eta) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_speed(start, now, bytes): | ||||
|         dif = now - start | ||||
|         if bytes == 0 or dif < 0.001: # One millisecond | ||||
|             return None | ||||
|         return float(bytes) / dif | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_speed(speed): | ||||
|         if speed is None: | ||||
|             return '%10s' % '---b/s' | ||||
|         return '%10s' % ('%s/s' % format_bytes(speed)) | ||||
|  | ||||
|     @staticmethod | ||||
|     def best_block_size(elapsed_time, bytes): | ||||
|         new_min = max(bytes / 2.0, 1.0) | ||||
|         new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB | ||||
|         if elapsed_time < 0.001: | ||||
|             return int(new_max) | ||||
|         rate = bytes / elapsed_time | ||||
|         if rate > new_max: | ||||
|             return int(new_max) | ||||
|         if rate < new_min: | ||||
|             return int(new_min) | ||||
|         return int(rate) | ||||
|  | ||||
|     @staticmethod | ||||
|     def parse_bytes(bytestr): | ||||
|         """Parse a string indicating a byte quantity into an integer.""" | ||||
|         matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) | ||||
|         if matchobj is None: | ||||
|             return None | ||||
|         number = float(matchobj.group(1)) | ||||
|         multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) | ||||
|         return int(round(number * multiplier)) | ||||
|  | ||||
|     def to_screen(self, *args, **kargs): | ||||
|         self.ydl.to_screen(*args, **kargs) | ||||
|  | ||||
|     def to_stderr(self, message): | ||||
|         self.ydl.to_screen(message) | ||||
|  | ||||
|     def to_console_title(self, message): | ||||
|         self.ydl.to_console_title(message) | ||||
|  | ||||
|     def trouble(self, *args, **kargs): | ||||
|         self.ydl.trouble(*args, **kargs) | ||||
|  | ||||
|     def report_warning(self, *args, **kargs): | ||||
|         self.ydl.report_warning(*args, **kargs) | ||||
|  | ||||
|     def report_error(self, *args, **kargs): | ||||
|         self.ydl.report_error(*args, **kargs) | ||||
|  | ||||
|     def slow_down(self, start_time, byte_counter): | ||||
|         """Sleep if the download speed is over the rate limit.""" | ||||
|         rate_limit = self.params.get('ratelimit', None) | ||||
|         if rate_limit is None or byte_counter == 0: | ||||
|             return | ||||
|         now = time.time() | ||||
|         elapsed = now - start_time | ||||
|         if elapsed <= 0.0: | ||||
|             return | ||||
|         speed = float(byte_counter) / elapsed | ||||
|         if speed > rate_limit: | ||||
|             time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) | ||||
|  | ||||
|     def temp_name(self, filename): | ||||
|         """Returns a temporary filename for the given filename.""" | ||||
|         if self.params.get('nopart', False) or filename == u'-' or \ | ||||
|                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): | ||||
|             return filename | ||||
|         return filename + u'.part' | ||||
|  | ||||
|     def undo_temp_name(self, filename): | ||||
|         if filename.endswith(u'.part'): | ||||
|             return filename[:-len(u'.part')] | ||||
|         return filename | ||||
|  | ||||
|     def try_rename(self, old_filename, new_filename): | ||||
|         try: | ||||
|             if old_filename == new_filename: | ||||
|                 return | ||||
|             os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) | ||||
|         except (IOError, OSError): | ||||
|             self.report_error(u'unable to rename file') | ||||
|  | ||||
|     def try_utime(self, filename, last_modified_hdr): | ||||
|         """Try to set the last-modified time of the given file.""" | ||||
|         if last_modified_hdr is None: | ||||
|             return | ||||
|         if not os.path.isfile(encodeFilename(filename)): | ||||
|             return | ||||
|         timestr = last_modified_hdr | ||||
|         if timestr is None: | ||||
|             return | ||||
|         filetime = timeconvert(timestr) | ||||
|         if filetime is None: | ||||
|             return filetime | ||||
|         # Ignore obviously invalid dates | ||||
|         if filetime == 0: | ||||
|             return | ||||
|         try: | ||||
|             os.utime(filename, (time.time(), filetime)) | ||||
|         except: | ||||
|             pass | ||||
|         return filetime | ||||
|  | ||||
|     def report_destination(self, filename): | ||||
|         """Report destination filename.""" | ||||
|         self.to_screen(u'[download] Destination: ' + filename) | ||||
|  | ||||
|     def _report_progress_status(self, msg, is_last_line=False): | ||||
|         fullmsg = u'[download] ' + msg | ||||
|         if self.params.get('progress_with_newline', False): | ||||
|             self.to_screen(fullmsg) | ||||
|         else: | ||||
|             if os.name == 'nt': | ||||
|                 prev_len = getattr(self, '_report_progress_prev_line_length', | ||||
|                                    0) | ||||
|                 if prev_len > len(fullmsg): | ||||
|                     fullmsg += u' ' * (prev_len - len(fullmsg)) | ||||
|                 self._report_progress_prev_line_length = len(fullmsg) | ||||
|                 clear_line = u'\r' | ||||
|             else: | ||||
|                 clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r') | ||||
|             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) | ||||
|         self.to_console_title(u'youtube-dl ' + msg) | ||||
|  | ||||
|     def report_progress(self, percent, data_len_str, speed, eta): | ||||
|         """Report download progress.""" | ||||
|         if self.params.get('noprogress', False): | ||||
|             return | ||||
|         if eta is not None: | ||||
|             eta_str = self.format_eta(eta) | ||||
|         else: | ||||
|             eta_str = 'Unknown ETA' | ||||
|         if percent is not None: | ||||
|             percent_str = self.format_percent(percent) | ||||
|         else: | ||||
|             percent_str = 'Unknown %' | ||||
|         speed_str = self.format_speed(speed) | ||||
|  | ||||
|         msg = (u'%s of %s at %s ETA %s' % | ||||
|                (percent_str, data_len_str, speed_str, eta_str)) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
|     def report_progress_live_stream(self, downloaded_data_len, speed, elapsed): | ||||
|         if self.params.get('noprogress', False): | ||||
|             return | ||||
|         downloaded_str = format_bytes(downloaded_data_len) | ||||
|         speed_str = self.format_speed(speed) | ||||
|         elapsed_str = FileDownloader.format_seconds(elapsed) | ||||
|         msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
|     def report_finish(self, data_len_str, tot_time): | ||||
|         """Report download finished.""" | ||||
|         if self.params.get('noprogress', False): | ||||
|             self.to_screen(u'[download] Download completed') | ||||
|         else: | ||||
|             self._report_progress_status( | ||||
|                 (u'100%% of %s in %s' % | ||||
|                  (data_len_str, self.format_seconds(tot_time))), | ||||
|                 is_last_line=True) | ||||
|  | ||||
|     def report_resuming_byte(self, resume_len): | ||||
|         """Report attempt to resume at given byte.""" | ||||
|         self.to_screen(u'[download] Resuming download at byte %s' % resume_len) | ||||
|  | ||||
|     def report_retry(self, count, retries): | ||||
|         """Report retry in case of HTTP error 5xx""" | ||||
|         self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | ||||
|  | ||||
|     def report_file_already_downloaded(self, file_name): | ||||
|         """Report file has already been fully downloaded.""" | ||||
|         try: | ||||
|             self.to_screen(u'[download] %s has already been downloaded' % file_name) | ||||
|         except UnicodeEncodeError: | ||||
|             self.to_screen(u'[download] The file has already been downloaded') | ||||
|  | ||||
|     def report_unable_to_resume(self): | ||||
|         """Report it was impossible to resume download.""" | ||||
|         self.to_screen(u'[download] Unable to resume') | ||||
|  | ||||
|     def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url, live, conn): | ||||
|         def run_rtmpdump(args): | ||||
|             start = time.time() | ||||
|             resume_percent = None | ||||
|             resume_downloaded_data_len = None | ||||
|             proc = subprocess.Popen(args, stderr=subprocess.PIPE) | ||||
|             cursor_in_new_line = True | ||||
|             proc_stderr_closed = False | ||||
|             while not proc_stderr_closed: | ||||
|                 # read line from stderr | ||||
|                 line = u'' | ||||
|                 while True: | ||||
|                     char = proc.stderr.read(1) | ||||
|                     if not char: | ||||
|                         proc_stderr_closed = True | ||||
|                         break | ||||
|                     if char in [b'\r', b'\n']: | ||||
|                         break | ||||
|                     line += char.decode('ascii', 'replace') | ||||
|                 if not line: | ||||
|                     # proc_stderr_closed is True | ||||
|                     continue | ||||
|                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) | ||||
|                 if mobj: | ||||
|                     downloaded_data_len = int(float(mobj.group(1))*1024) | ||||
|                     percent = float(mobj.group(2)) | ||||
|                     if not resume_percent: | ||||
|                         resume_percent = percent | ||||
|                         resume_downloaded_data_len = downloaded_data_len | ||||
|                     eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) | ||||
|                     speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) | ||||
|                     data_len = None | ||||
|                     if percent > 0: | ||||
|                         data_len = int(downloaded_data_len * 100 / percent) | ||||
|                     data_len_str = u'~' + format_bytes(data_len) | ||||
|                     self.report_progress(percent, data_len_str, speed, eta) | ||||
|                     cursor_in_new_line = False | ||||
|                     self._hook_progress({ | ||||
|                         'downloaded_bytes': downloaded_data_len, | ||||
|                         'total_bytes': data_len, | ||||
|                         'tmpfilename': tmpfilename, | ||||
|                         'filename': filename, | ||||
|                         'status': 'downloading', | ||||
|                         'eta': eta, | ||||
|                         'speed': speed, | ||||
|                     }) | ||||
|                 else: | ||||
|                     # no percent for live streams | ||||
|                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) | ||||
|                     if mobj: | ||||
|                         downloaded_data_len = int(float(mobj.group(1))*1024) | ||||
|                         time_now = time.time() | ||||
|                         speed = self.calc_speed(start, time_now, downloaded_data_len) | ||||
|                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) | ||||
|                         cursor_in_new_line = False | ||||
|                         self._hook_progress({ | ||||
|                             'downloaded_bytes': downloaded_data_len, | ||||
|                             'tmpfilename': tmpfilename, | ||||
|                             'filename': filename, | ||||
|                             'status': 'downloading', | ||||
|                             'speed': speed, | ||||
|                         }) | ||||
|                     elif self.params.get('verbose', False): | ||||
|                         if not cursor_in_new_line: | ||||
|                             self.to_screen(u'') | ||||
|                         cursor_in_new_line = True | ||||
|                         self.to_screen(u'[rtmpdump] '+line) | ||||
|             proc.wait() | ||||
|             if not cursor_in_new_line: | ||||
|                 self.to_screen(u'') | ||||
|             return proc.returncode | ||||
|  | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|         test = self.params.get('test', False) | ||||
|  | ||||
|         # Check for rtmpdump first | ||||
|         try: | ||||
|             subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|         except (OSError, IOError): | ||||
|             self.report_error(u'RTMP download detected but "rtmpdump" could not be run') | ||||
|             return False | ||||
|  | ||||
|         # Download using rtmpdump. rtmpdump returns exit code 2 when | ||||
|         # the connection was interrumpted and resuming appears to be | ||||
|         # possible. This is part of rtmpdump's normal usage, AFAIK. | ||||
|         basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename] | ||||
|         if player_url is not None: | ||||
|             basic_args += ['--swfVfy', player_url] | ||||
|         if page_url is not None: | ||||
|             basic_args += ['--pageUrl', page_url] | ||||
|         if play_path is not None: | ||||
|             basic_args += ['--playpath', play_path] | ||||
|         if tc_url is not None: | ||||
|             basic_args += ['--tcUrl', url] | ||||
|         if test: | ||||
|             basic_args += ['--stop', '1'] | ||||
|         if live: | ||||
|             basic_args += ['--live'] | ||||
|         if conn: | ||||
|             basic_args += ['--conn', conn] | ||||
|         args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)] | ||||
|  | ||||
|         if sys.platform == 'win32' and sys.version_info < (3, 0): | ||||
|             # Windows subprocess module does not actually support Unicode | ||||
|             # on Python 2.x | ||||
|             # See http://stackoverflow.com/a/9951851/35070 | ||||
|             subprocess_encoding = sys.getfilesystemencoding() | ||||
|             args = [a.encode(subprocess_encoding, 'ignore') for a in args] | ||||
|         else: | ||||
|             subprocess_encoding = None | ||||
|  | ||||
|         if self.params.get('verbose', False): | ||||
|             if subprocess_encoding: | ||||
|                 str_args = [ | ||||
|                     a.decode(subprocess_encoding) if isinstance(a, bytes) else a | ||||
|                     for a in args] | ||||
|             else: | ||||
|                 str_args = args | ||||
|             try: | ||||
|                 import pipes | ||||
|                 shell_quote = lambda args: ' '.join(map(pipes.quote, str_args)) | ||||
|             except ImportError: | ||||
|                 shell_quote = repr | ||||
|             self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args)) | ||||
|  | ||||
|         retval = run_rtmpdump(args) | ||||
|  | ||||
|         while (retval == 2 or retval == 1) and not test: | ||||
|             prevsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'[rtmpdump] %s bytes' % prevsize) | ||||
|             time.sleep(5.0) # This seems to be needed | ||||
|             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) | ||||
|             cursize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             if prevsize == cursize and retval == 1: | ||||
|                 break | ||||
|              # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those | ||||
|             if prevsize == cursize and retval == 2 and cursize > 1024: | ||||
|                 self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.') | ||||
|                 retval = 0 | ||||
|                 break | ||||
|         if retval == 0 or (test and retval == 2): | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'[rtmpdump] %s bytes' % fsize) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'rtmpdump exited with code %d' % retval) | ||||
|             return False | ||||
|  | ||||
|     def _download_with_mplayer(self, filename, url): | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|  | ||||
|         args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url] | ||||
|         # Check for mplayer first | ||||
|         try: | ||||
|             subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|         except (OSError, IOError): | ||||
|             self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] ) | ||||
|             return False | ||||
|  | ||||
|         # Download using mplayer.  | ||||
|         retval = subprocess.call(args) | ||||
|         if retval == 0: | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'mplayer exited with code %d' % retval) | ||||
|             return False | ||||
|  | ||||
|     def _download_m3u8_with_ffmpeg(self, filename, url): | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|  | ||||
|         args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy', | ||||
|             '-bsf:a', 'aac_adtstoasc', tmpfilename] | ||||
|  | ||||
|         for program in ['avconv', 'ffmpeg']: | ||||
|             try: | ||||
|                 subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|                 break | ||||
|             except (OSError, IOError): | ||||
|                 pass | ||||
|         else: | ||||
|             self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found') | ||||
|         cmd = [program] + args | ||||
|  | ||||
|         retval = subprocess.call(cmd) | ||||
|         if retval == 0: | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'ffmpeg exited with code %d' % retval) | ||||
|             return False | ||||
| # Legacy file for backwards compatibility, use youtube_dl.downloader instead! | ||||
| from .downloader import FileDownloader as RealFileDownloader | ||||
| from .downloader import get_suitable_downloader | ||||
|  | ||||
|  | ||||
| # This class reproduces the old behaviour of FileDownloader | ||||
| class FileDownloader(RealFileDownloader): | ||||
|     def _do_download(self, filename, info_dict): | ||||
|         url = info_dict['url'] | ||||
|  | ||||
|         # Check file already present | ||||
|         if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): | ||||
|             self.report_file_already_downloaded(filename) | ||||
|             self._hook_progress({ | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|                 'total_bytes': os.path.getsize(encodeFilename(filename)), | ||||
|             }) | ||||
|             return True | ||||
|  | ||||
|         # Attempt to download using rtmpdump | ||||
|         if url.startswith('rtmp'): | ||||
|             return self._download_with_rtmpdump(filename, url, | ||||
|                                                 info_dict.get('player_url', None), | ||||
|                                                 info_dict.get('page_url', None), | ||||
|                                                 info_dict.get('play_path', None), | ||||
|                                                 info_dict.get('tc_url', None), | ||||
|                                                 info_dict.get('rtmp_live', False), | ||||
|                                                 info_dict.get('rtmp_conn', None)) | ||||
|  | ||||
|         # Attempt to download using mplayer | ||||
|         if url.startswith('mms') or url.startswith('rtsp'): | ||||
|             return self._download_with_mplayer(filename, url) | ||||
|  | ||||
|         # m3u8 manifest are downloaded with ffmpeg | ||||
|         if determine_ext(url) == u'm3u8': | ||||
|             return self._download_m3u8_with_ffmpeg(filename, url) | ||||
|  | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|         stream = None | ||||
|  | ||||
|         # Do not include the Accept-Encoding header | ||||
|         headers = {'Youtubedl-no-compression': 'True'} | ||||
|         if 'user_agent' in info_dict: | ||||
|             headers['Youtubedl-user-agent'] = info_dict['user_agent'] | ||||
|         basic_request = compat_urllib_request.Request(url, None, headers) | ||||
|         request = compat_urllib_request.Request(url, None, headers) | ||||
|  | ||||
|         if self.params.get('test', False): | ||||
|             request.add_header('Range','bytes=0-10240') | ||||
|  | ||||
|         # Establish possible resume length | ||||
|         if os.path.isfile(encodeFilename(tmpfilename)): | ||||
|             resume_len = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|         else: | ||||
|             resume_len = 0 | ||||
|  | ||||
|         open_mode = 'wb' | ||||
|         if resume_len != 0: | ||||
|             if self.params.get('continuedl', False): | ||||
|                 self.report_resuming_byte(resume_len) | ||||
|                 request.add_header('Range','bytes=%d-' % resume_len) | ||||
|                 open_mode = 'ab' | ||||
|             else: | ||||
|                 resume_len = 0 | ||||
|  | ||||
|         count = 0 | ||||
|         retries = self.params.get('retries', 0) | ||||
|         while count <= retries: | ||||
|             # Establish connection | ||||
|             try: | ||||
|                 if count == 0 and 'urlhandle' in info_dict: | ||||
|                     data = info_dict['urlhandle'] | ||||
|                 data = compat_urllib_request.urlopen(request) | ||||
|                 break | ||||
|             except (compat_urllib_error.HTTPError, ) as err: | ||||
|                 if (err.code < 500 or err.code >= 600) and err.code != 416: | ||||
|                     # Unexpected HTTP error | ||||
|                     raise | ||||
|                 elif err.code == 416: | ||||
|                     # Unable to resume (requested range not satisfiable) | ||||
|                     try: | ||||
|                         # Open the connection again without the range header | ||||
|                         data = compat_urllib_request.urlopen(basic_request) | ||||
|                         content_length = data.info()['Content-Length'] | ||||
|                     except (compat_urllib_error.HTTPError, ) as err: | ||||
|                         if err.code < 500 or err.code >= 600: | ||||
|                             raise | ||||
|                     else: | ||||
|                         # Examine the reported length | ||||
|                         if (content_length is not None and | ||||
|                                 (resume_len - 100 < int(content_length) < resume_len + 100)): | ||||
|                             # The file had already been fully downloaded. | ||||
|                             # Explanation to the above condition: in issue #175 it was revealed that | ||||
|                             # YouTube sometimes adds or removes a few bytes from the end of the file, | ||||
|                             # changing the file size slightly and causing problems for some users. So | ||||
|                             # I decided to implement a suggested change and consider the file | ||||
|                             # completely downloaded if the file size differs less than 100 bytes from | ||||
|                             # the one in the hard drive. | ||||
|                             self.report_file_already_downloaded(filename) | ||||
|                             self.try_rename(tmpfilename, filename) | ||||
|                             self._hook_progress({ | ||||
|                                 'filename': filename, | ||||
|                                 'status': 'finished', | ||||
|                             }) | ||||
|                             return True | ||||
|                         else: | ||||
|                             # The length does not match, we start the download over | ||||
|                             self.report_unable_to_resume() | ||||
|                             open_mode = 'wb' | ||||
|                             break | ||||
|             # Retry | ||||
|             count += 1 | ||||
|             if count <= retries: | ||||
|                 self.report_retry(count, retries) | ||||
|  | ||||
|         if count > retries: | ||||
|             self.report_error(u'giving up after %s retries' % retries) | ||||
|             return False | ||||
|  | ||||
|         data_len = data.info().get('Content-length', None) | ||||
|         if data_len is not None: | ||||
|             data_len = int(data_len) + resume_len | ||||
|             min_data_len = self.params.get("min_filesize", None) | ||||
|             max_data_len =  self.params.get("max_filesize", None) | ||||
|             if min_data_len is not None and data_len < min_data_len: | ||||
|                 self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) | ||||
|                 return False | ||||
|             if max_data_len is not None and data_len > max_data_len: | ||||
|                 self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) | ||||
|                 return False | ||||
|  | ||||
|         data_len_str = format_bytes(data_len) | ||||
|         byte_counter = 0 + resume_len | ||||
|         block_size = self.params.get('buffersize', 1024) | ||||
|         start = time.time() | ||||
|         while True: | ||||
|             # Download and write | ||||
|             before = time.time() | ||||
|             data_block = data.read(block_size) | ||||
|             after = time.time() | ||||
|             if len(data_block) == 0: | ||||
|                 break | ||||
|             byte_counter += len(data_block) | ||||
|  | ||||
|             # Open file just in time | ||||
|             if stream is None: | ||||
|                 try: | ||||
|                     (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) | ||||
|                     assert stream is not None | ||||
|                     filename = self.undo_temp_name(tmpfilename) | ||||
|                     self.report_destination(filename) | ||||
|                 except (OSError, IOError) as err: | ||||
|                     self.report_error(u'unable to open for writing: %s' % str(err)) | ||||
|                     return False | ||||
|             try: | ||||
|                 stream.write(data_block) | ||||
|             except (IOError, OSError) as err: | ||||
|                 self.to_stderr(u"\n") | ||||
|                 self.report_error(u'unable to write data: %s' % str(err)) | ||||
|                 return False | ||||
|             if not self.params.get('noresizebuffer', False): | ||||
|                 block_size = self.best_block_size(after - before, len(data_block)) | ||||
|  | ||||
|             # Progress message | ||||
|             speed = self.calc_speed(start, time.time(), byte_counter - resume_len) | ||||
|             if data_len is None: | ||||
|                 eta = percent = None | ||||
|             else: | ||||
|                 percent = self.calc_percent(byte_counter, data_len) | ||||
|                 eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) | ||||
|             self.report_progress(percent, data_len_str, speed, eta) | ||||
|  | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': byte_counter, | ||||
|                 'total_bytes': data_len, | ||||
|                 'tmpfilename': tmpfilename, | ||||
|                 'filename': filename, | ||||
|                 'status': 'downloading', | ||||
|                 'eta': eta, | ||||
|                 'speed': speed, | ||||
|             }) | ||||
|  | ||||
|             # Apply rate limit | ||||
|             self.slow_down(start, byte_counter - resume_len) | ||||
|  | ||||
|         if stream is None: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'Did not get any data blocks') | ||||
|             return False | ||||
|         stream.close() | ||||
|         self.report_finish(data_len_str, (time.time() - start)) | ||||
|         if data_len is not None and byte_counter != data_len: | ||||
|             raise ContentTooShortError(byte_counter, int(data_len)) | ||||
|         self.try_rename(tmpfilename, filename) | ||||
|  | ||||
|         # Update file modification time | ||||
|         if self.params.get('updatetime', True): | ||||
|             info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) | ||||
|  | ||||
|         self._hook_progress({ | ||||
|             'downloaded_bytes': byte_counter, | ||||
|             'total_bytes': byte_counter, | ||||
|             'filename': filename, | ||||
|             'status': 'finished', | ||||
|         }) | ||||
|  | ||||
|         return True | ||||
|  | ||||
|     def _hook_progress(self, status): | ||||
|         real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params) | ||||
|         for ph in self._progress_hooks: | ||||
|             ph(status) | ||||
|  | ||||
|     def add_progress_hook(self, ph): | ||||
|         """ ph gets called on download progress, with a dictionary with the entries | ||||
|         * filename: The final filename | ||||
|         * status: One of "downloading" and "finished" | ||||
|  | ||||
|         It can also have some of the following entries: | ||||
|  | ||||
|         * downloaded_bytes: Bytes on disks | ||||
|         * total_bytes: Total bytes, None if unknown | ||||
|         * tmpfilename: The filename we're currently writing to | ||||
|         * eta: The estimated time in seconds, None if unknown | ||||
|         * speed: The download speed in bytes/second, None if unknown | ||||
|  | ||||
|         Hooks are guaranteed to be called at least once (with status "finished") | ||||
|         if the download is successful. | ||||
|         """ | ||||
|         self._progress_hooks.append(ph) | ||||
|             real_fd.add_progress_hook(ph) | ||||
|         return real_fd.download(filename, info_dict) | ||||
|   | ||||
| @@ -1,4 +0,0 @@ | ||||
| # Legacy file for backwards compatibility, use youtube_dl.extractor instead! | ||||
|  | ||||
| from .extractor.common import InfoExtractor, SearchInfoExtractor | ||||
| from .extractor import gen_extractors, get_info_extractor | ||||
| @@ -1,9 +1,10 @@ | ||||
| #!/usr/bin/env python | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| from __future__ import absolute_import | ||||
| from __future__ import absolute_import, unicode_literals | ||||
|  | ||||
| import collections | ||||
| import datetime | ||||
| import errno | ||||
| import io | ||||
| import json | ||||
| @@ -39,6 +40,7 @@ from .utils import ( | ||||
|     locked_file, | ||||
|     make_HTTPS_handler, | ||||
|     MaxDownloadsReached, | ||||
|     PagedList, | ||||
|     PostProcessingError, | ||||
|     platform_name, | ||||
|     preferredencoding, | ||||
| @@ -51,9 +53,11 @@ from .utils import ( | ||||
|     write_json_file, | ||||
|     write_string, | ||||
|     YoutubeDLHandler, | ||||
|     prepend_extension, | ||||
| ) | ||||
| from .extractor import get_info_extractor, gen_extractors | ||||
| from .FileDownloader import FileDownloader | ||||
| from .downloader import get_suitable_downloader | ||||
| from .postprocessor import FFmpegMergerPP | ||||
| from .version import __version__ | ||||
|  | ||||
|  | ||||
| @@ -144,15 +148,25 @@ class YoutubeDL(object): | ||||
|                        again. | ||||
|     cookiefile:        File name where cookies should be read from and dumped to. | ||||
|     nocheckcertificate:Do not verify SSL certificates | ||||
|     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information. | ||||
|                        At the moment, this is only supported by YouTube. | ||||
|     proxy:             URL of the proxy server to use | ||||
|     socket_timeout:    Time to wait for unresponsive hosts, in seconds | ||||
|     bidi_workaround:   Work around buggy terminals without bidirectional text | ||||
|                        support, using fridibi | ||||
|     debug_printtraffic:Print out sent and received HTTP traffic | ||||
|     include_ads:       Download ads as well | ||||
|     default_search:    Prepend this string if an input url is not valid. | ||||
|                        'auto' for elaborate guessing | ||||
|  | ||||
|     The following parameters are not used by YoutubeDL itself, they are used by | ||||
|     the FileDownloader: | ||||
|     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, | ||||
|     noresizebuffer, retries, continuedl, noprogress, consoletitle | ||||
|  | ||||
|     The following options are used by the post processors: | ||||
|     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available, | ||||
|                        otherwise prefer avconv. | ||||
|     """ | ||||
|  | ||||
|     params = None | ||||
| @@ -164,6 +178,8 @@ class YoutubeDL(object): | ||||
|  | ||||
|     def __init__(self, params=None): | ||||
|         """Create a FileDownloader object with the given options.""" | ||||
|         if params is None: | ||||
|             params = {} | ||||
|         self._ies = [] | ||||
|         self._ies_instances = {} | ||||
|         self._pps = [] | ||||
| @@ -172,7 +188,7 @@ class YoutubeDL(object): | ||||
|         self._num_downloads = 0 | ||||
|         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] | ||||
|         self._err_file = sys.stderr | ||||
|         self.params = {} if params is None else params | ||||
|         self.params = params | ||||
|  | ||||
|         if params.get('bidi_workaround', False): | ||||
|             try: | ||||
| @@ -197,7 +213,7 @@ class YoutubeDL(object): | ||||
|                 self._output_channel = os.fdopen(master, 'rb') | ||||
|             except OSError as ose: | ||||
|                 if ose.errno == 2: | ||||
|                     self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.') | ||||
|                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.') | ||||
|                 else: | ||||
|                     raise | ||||
|  | ||||
| @@ -206,15 +222,13 @@ class YoutubeDL(object): | ||||
|                 and not params['restrictfilenames']): | ||||
|             # On Python 3, the Unicode filesystem API will throw errors (#1474) | ||||
|             self.report_warning( | ||||
|                 u'Assuming --restrict-filenames since file system encoding ' | ||||
|                 u'cannot encode all charactes. ' | ||||
|                 u'Set the LC_ALL environment variable to fix this.') | ||||
|                 'Assuming --restrict-filenames since file system encoding ' | ||||
|                 'cannot encode all charactes. ' | ||||
|                 'Set the LC_ALL environment variable to fix this.') | ||||
|             self.params['restrictfilenames'] = True | ||||
|  | ||||
|         self.fd = FileDownloader(self, self.params) | ||||
|  | ||||
|         if '%(stitle)s' in self.params.get('outtmpl', ''): | ||||
|             self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') | ||||
|             self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') | ||||
|  | ||||
|         self._setup_opener() | ||||
|  | ||||
| @@ -248,18 +262,22 @@ class YoutubeDL(object): | ||||
|         self._pps.append(pp) | ||||
|         pp.set_downloader(self) | ||||
|  | ||||
|     def add_progress_hook(self, ph): | ||||
|         """Add the progress hook (currently only for the file downloader)""" | ||||
|         self._progress_hooks.append(ph) | ||||
|  | ||||
|     def _bidi_workaround(self, message): | ||||
|         if not hasattr(self, '_output_channel'): | ||||
|             return message | ||||
|  | ||||
|         assert hasattr(self, '_output_process') | ||||
|         assert type(message) == type(u'') | ||||
|         line_count = message.count(u'\n') + 1 | ||||
|         self._output_process.stdin.write((message + u'\n').encode('utf-8')) | ||||
|         assert type(message) == type('') | ||||
|         line_count = message.count('\n') + 1 | ||||
|         self._output_process.stdin.write((message + '\n').encode('utf-8')) | ||||
|         self._output_process.stdin.flush() | ||||
|         res = u''.join(self._output_channel.readline().decode('utf-8') | ||||
|         res = ''.join(self._output_channel.readline().decode('utf-8') | ||||
|                        for _ in range(line_count)) | ||||
|         return res[:-len(u'\n')] | ||||
|         return res[:-len('\n')] | ||||
|  | ||||
|     def to_screen(self, message, skip_eol=False): | ||||
|         """Print message to stdout if not in quiet mode.""" | ||||
| @@ -271,19 +289,19 @@ class YoutubeDL(object): | ||||
|             self.params['logger'].debug(message) | ||||
|         elif not check_quiet or not self.params.get('quiet', False): | ||||
|             message = self._bidi_workaround(message) | ||||
|             terminator = [u'\n', u''][skip_eol] | ||||
|             terminator = ['\n', ''][skip_eol] | ||||
|             output = message + terminator | ||||
|  | ||||
|             write_string(output, self._screen_file) | ||||
|  | ||||
|     def to_stderr(self, message): | ||||
|         """Print message to stderr.""" | ||||
|         assert type(message) == type(u'') | ||||
|         assert type(message) == type('') | ||||
|         if self.params.get('logger'): | ||||
|             self.params['logger'].error(message) | ||||
|         else: | ||||
|             message = self._bidi_workaround(message) | ||||
|             output = message + u'\n' | ||||
|             output = message + '\n' | ||||
|             write_string(output, self._err_file) | ||||
|  | ||||
|     def to_console_title(self, message): | ||||
| @@ -294,21 +312,21 @@ class YoutubeDL(object): | ||||
|             # already of type unicode() | ||||
|             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) | ||||
|         elif 'TERM' in os.environ: | ||||
|             write_string(u'\033]0;%s\007' % message, self._screen_file) | ||||
|             write_string('\033]0;%s\007' % message, self._screen_file) | ||||
|  | ||||
|     def save_console_title(self): | ||||
|         if not self.params.get('consoletitle', False): | ||||
|             return | ||||
|         if 'TERM' in os.environ: | ||||
|             # Save the title on stack | ||||
|             write_string(u'\033[22;0t', self._screen_file) | ||||
|             write_string('\033[22;0t', self._screen_file) | ||||
|  | ||||
|     def restore_console_title(self): | ||||
|         if not self.params.get('consoletitle', False): | ||||
|             return | ||||
|         if 'TERM' in os.environ: | ||||
|             # Restore the title from stack | ||||
|             write_string(u'\033[23;0t', self._screen_file) | ||||
|             write_string('\033[23;0t', self._screen_file) | ||||
|  | ||||
|     def __enter__(self): | ||||
|         self.save_console_title() | ||||
| @@ -316,7 +334,7 @@ class YoutubeDL(object): | ||||
|  | ||||
|     def __exit__(self, *args): | ||||
|         self.restore_console_title() | ||||
|      | ||||
|  | ||||
|         if self.params.get('cookiefile') is not None: | ||||
|             self.cookiejar.save() | ||||
|  | ||||
| @@ -334,13 +352,13 @@ class YoutubeDL(object): | ||||
|         if self.params.get('verbose'): | ||||
|             if tb is None: | ||||
|                 if sys.exc_info()[0]:  # if .trouble has been called from an except block | ||||
|                     tb = u'' | ||||
|                     tb = '' | ||||
|                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: | ||||
|                         tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) | ||||
|                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) | ||||
|                     tb += compat_str(traceback.format_exc()) | ||||
|                 else: | ||||
|                     tb_data = traceback.format_list(traceback.extract_stack()) | ||||
|                     tb = u''.join(tb_data) | ||||
|                     tb = ''.join(tb_data) | ||||
|             self.to_stderr(tb) | ||||
|         if not self.params.get('ignoreerrors', False): | ||||
|             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: | ||||
| @@ -355,12 +373,15 @@ class YoutubeDL(object): | ||||
|         Print the message to stderr, it will be prefixed with 'WARNING:' | ||||
|         If stderr is a tty file the 'WARNING:' will be colored | ||||
|         ''' | ||||
|         if self._err_file.isatty() and os.name != 'nt': | ||||
|             _msg_header = u'\033[0;33mWARNING:\033[0m' | ||||
|         if self.params.get('logger') is not None: | ||||
|             self.params['logger'].warning(message) | ||||
|         else: | ||||
|             _msg_header = u'WARNING:' | ||||
|         warning_message = u'%s %s' % (_msg_header, message) | ||||
|         self.to_stderr(warning_message) | ||||
|             if self._err_file.isatty() and os.name != 'nt': | ||||
|                 _msg_header = '\033[0;33mWARNING:\033[0m' | ||||
|             else: | ||||
|                 _msg_header = 'WARNING:' | ||||
|             warning_message = '%s %s' % (_msg_header, message) | ||||
|             self.to_stderr(warning_message) | ||||
|  | ||||
|     def report_error(self, message, tb=None): | ||||
|         ''' | ||||
| @@ -368,22 +389,18 @@ class YoutubeDL(object): | ||||
|         in red if stderr is a tty file. | ||||
|         ''' | ||||
|         if self._err_file.isatty() and os.name != 'nt': | ||||
|             _msg_header = u'\033[0;31mERROR:\033[0m' | ||||
|             _msg_header = '\033[0;31mERROR:\033[0m' | ||||
|         else: | ||||
|             _msg_header = u'ERROR:' | ||||
|         error_message = u'%s %s' % (_msg_header, message) | ||||
|             _msg_header = 'ERROR:' | ||||
|         error_message = '%s %s' % (_msg_header, message) | ||||
|         self.trouble(error_message, tb) | ||||
|  | ||||
|     def report_file_already_downloaded(self, file_name): | ||||
|         """Report file has already been fully downloaded.""" | ||||
|         try: | ||||
|             self.to_screen(u'[download] %s has already been downloaded' % file_name) | ||||
|             self.to_screen('[download] %s has already been downloaded' % file_name) | ||||
|         except UnicodeEncodeError: | ||||
|             self.to_screen(u'[download] The file has already been downloaded') | ||||
|  | ||||
|     def increment_downloads(self): | ||||
|         """Increment the ordinal that assigns a number to each file.""" | ||||
|         self._num_downloads += 1 | ||||
|             self.to_screen('[download] The file has already been downloaded') | ||||
|  | ||||
|     def prepare_filename(self, info_dict): | ||||
|         """Generate the output filename.""" | ||||
| @@ -394,61 +411,68 @@ class YoutubeDL(object): | ||||
|             autonumber_size = self.params.get('autonumber_size') | ||||
|             if autonumber_size is None: | ||||
|                 autonumber_size = 5 | ||||
|             autonumber_templ = u'%0' + str(autonumber_size) + u'd' | ||||
|             autonumber_templ = '%0' + str(autonumber_size) + 'd' | ||||
|             template_dict['autonumber'] = autonumber_templ % self._num_downloads | ||||
|             if template_dict.get('playlist_index') is not None: | ||||
|                 template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index'] | ||||
|                 template_dict['playlist_index'] = '%05d' % template_dict['playlist_index'] | ||||
|             if template_dict.get('resolution') is None: | ||||
|                 if template_dict.get('width') and template_dict.get('height'): | ||||
|                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) | ||||
|                 elif template_dict.get('height'): | ||||
|                     template_dict['resolution'] = '%sp' % template_dict['height'] | ||||
|                 elif template_dict.get('width'): | ||||
|                     template_dict['resolution'] = '?x%d' % template_dict['width'] | ||||
|  | ||||
|             sanitize = lambda k, v: sanitize_filename( | ||||
|                 compat_str(v), | ||||
|                 restricted=self.params.get('restrictfilenames'), | ||||
|                 is_id=(k == u'id')) | ||||
|                 is_id=(k == 'id')) | ||||
|             template_dict = dict((k, sanitize(k, v)) | ||||
|                                  for k, v in template_dict.items() | ||||
|                                  if v is not None) | ||||
|             template_dict = collections.defaultdict(lambda: u'NA', template_dict) | ||||
|             template_dict = collections.defaultdict(lambda: 'NA', template_dict) | ||||
|  | ||||
|             tmpl = os.path.expanduser(self.params['outtmpl']) | ||||
|             filename = tmpl % template_dict | ||||
|             return filename | ||||
|         except ValueError as err: | ||||
|             self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')') | ||||
|             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') | ||||
|             return None | ||||
|  | ||||
|     def _match_entry(self, info_dict): | ||||
|         """ Returns None iff the file should be downloaded """ | ||||
|  | ||||
|         video_title = info_dict.get('title', info_dict.get('id', u'video')) | ||||
|         video_title = info_dict.get('title', info_dict.get('id', 'video')) | ||||
|         if 'title' in info_dict: | ||||
|             # This can happen when we're just evaluating the playlist | ||||
|             title = info_dict['title'] | ||||
|             matchtitle = self.params.get('matchtitle', False) | ||||
|             if matchtitle: | ||||
|                 if not re.search(matchtitle, title, re.IGNORECASE): | ||||
|                     return u'"' + title + '" title did not match pattern "' + matchtitle + '"' | ||||
|                     return '"' + title + '" title did not match pattern "' + matchtitle + '"' | ||||
|             rejecttitle = self.params.get('rejecttitle', False) | ||||
|             if rejecttitle: | ||||
|                 if re.search(rejecttitle, title, re.IGNORECASE): | ||||
|                     return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' | ||||
|                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' | ||||
|         date = info_dict.get('upload_date', None) | ||||
|         if date is not None: | ||||
|             dateRange = self.params.get('daterange', DateRange()) | ||||
|             if date not in dateRange: | ||||
|                 return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) | ||||
|                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) | ||||
|         view_count = info_dict.get('view_count', None) | ||||
|         if view_count is not None: | ||||
|             min_views = self.params.get('min_views') | ||||
|             if min_views is not None and view_count < min_views: | ||||
|                 return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) | ||||
|                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) | ||||
|             max_views = self.params.get('max_views') | ||||
|             if max_views is not None and view_count > max_views: | ||||
|                 return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) | ||||
|                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) | ||||
|         age_limit = self.params.get('age_limit') | ||||
|         if age_limit is not None: | ||||
|             if age_limit < info_dict.get('age_limit', 0): | ||||
|                 return u'Skipping "' + title + '" because it is age restricted' | ||||
|                 return 'Skipping "' + title + '" because it is age restricted' | ||||
|         if self.in_download_archive(info_dict): | ||||
|             return u'%s has already been recorded in archive' % video_title | ||||
|             return '%s has already been recorded in archive' % video_title | ||||
|         return None | ||||
|  | ||||
|     @staticmethod | ||||
| @@ -475,8 +499,8 @@ class YoutubeDL(object): | ||||
|                 continue | ||||
|  | ||||
|             if not ie.working(): | ||||
|                 self.report_warning(u'The program functionality for this site has been marked as broken, ' | ||||
|                                     u'and will probably not work.') | ||||
|                 self.report_warning('The program functionality for this site has been marked as broken, ' | ||||
|                                     'and will probably not work.') | ||||
|  | ||||
|             try: | ||||
|                 ie_result = ie.extract(url) | ||||
| @@ -502,6 +526,8 @@ class YoutubeDL(object): | ||||
|             except ExtractorError as de: # An error we somewhat expected | ||||
|                 self.report_error(compat_str(de), de.format_traceback()) | ||||
|                 break | ||||
|             except MaxDownloadsReached: | ||||
|                 raise | ||||
|             except Exception as e: | ||||
|                 if self.params.get('ignoreerrors', False): | ||||
|                     self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) | ||||
| @@ -509,7 +535,7 @@ class YoutubeDL(object): | ||||
|                 else: | ||||
|                     raise | ||||
|         else: | ||||
|             self.report_error(u'no suitable InfoExtractor: %s' % url) | ||||
|             self.report_error('no suitable InfoExtractor for URL %s' % url) | ||||
|  | ||||
|     def process_ie_result(self, ie_result, download=True, extra_info={}): | ||||
|         """ | ||||
| @@ -540,7 +566,7 @@ class YoutubeDL(object): | ||||
|             def make_result(embedded_info): | ||||
|                 new_result = ie_result.copy() | ||||
|                 for f in ('_type', 'url', 'ext', 'player_url', 'formats', | ||||
|                           'entries', 'urlhandle', 'ie_key', 'duration', | ||||
|                           'entries', 'ie_key', 'duration', | ||||
|                           'subtitles', 'annotations', 'format', | ||||
|                           'thumbnail', 'thumbnails'): | ||||
|                     if f in new_result: | ||||
| @@ -560,26 +586,34 @@ class YoutubeDL(object): | ||||
|         elif result_type == 'playlist': | ||||
|             # We process each entry in the playlist | ||||
|             playlist = ie_result.get('title', None) or ie_result.get('id', None) | ||||
|             self.to_screen(u'[download] Downloading playlist: %s' % playlist) | ||||
|             self.to_screen('[download] Downloading playlist: %s' % playlist) | ||||
|  | ||||
|             playlist_results = [] | ||||
|  | ||||
|             n_all_entries = len(ie_result['entries']) | ||||
|             playliststart = self.params.get('playliststart', 1) - 1 | ||||
|             playlistend = self.params.get('playlistend', None) | ||||
|             # For backwards compatibility, interpret -1 as whole list | ||||
|             if playlistend == -1: | ||||
|                 playlistend = None | ||||
|  | ||||
|             entries = ie_result['entries'][playliststart:playlistend] | ||||
|             n_entries = len(entries) | ||||
|  | ||||
|             self.to_screen( | ||||
|                 u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" % | ||||
|                 (ie_result['extractor'], playlist, n_all_entries, n_entries)) | ||||
|             if isinstance(ie_result['entries'], list): | ||||
|                 n_all_entries = len(ie_result['entries']) | ||||
|                 entries = ie_result['entries'][playliststart:playlistend] | ||||
|                 n_entries = len(entries) | ||||
|                 self.to_screen( | ||||
|                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % | ||||
|                     (ie_result['extractor'], playlist, n_all_entries, n_entries)) | ||||
|             else: | ||||
|                 assert isinstance(ie_result['entries'], PagedList) | ||||
|                 entries = ie_result['entries'].getslice( | ||||
|                     playliststart, playlistend) | ||||
|                 n_entries = len(entries) | ||||
|                 self.to_screen( | ||||
|                     "[%s] playlist %s: Downloading %d videos" % | ||||
|                     (ie_result['extractor'], playlist, n_entries)) | ||||
|  | ||||
|             for i, entry in enumerate(entries, 1): | ||||
|                 self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries)) | ||||
|                 self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries)) | ||||
|                 extra = { | ||||
|                     'playlist': playlist, | ||||
|                     'playlist_index': i + playliststart, | ||||
| @@ -591,7 +625,7 @@ class YoutubeDL(object): | ||||
|  | ||||
|                 reason = self._match_entry(entry) | ||||
|                 if reason is not None: | ||||
|                     self.to_screen(u'[download] ' + reason) | ||||
|                     self.to_screen('[download] ' + reason) | ||||
|                     continue | ||||
|  | ||||
|                 entry_result = self.process_ie_result(entry, | ||||
| @@ -623,8 +657,32 @@ class YoutubeDL(object): | ||||
|             return available_formats[-1] | ||||
|         elif format_spec == 'worst': | ||||
|             return available_formats[0] | ||||
|         elif format_spec == 'bestaudio': | ||||
|             audio_formats = [ | ||||
|                 f for f in available_formats | ||||
|                 if f.get('vcodec') == 'none'] | ||||
|             if audio_formats: | ||||
|                 return audio_formats[-1] | ||||
|         elif format_spec == 'worstaudio': | ||||
|             audio_formats = [ | ||||
|                 f for f in available_formats | ||||
|                 if f.get('vcodec') == 'none'] | ||||
|             if audio_formats: | ||||
|                 return audio_formats[0] | ||||
|         elif format_spec == 'bestvideo': | ||||
|             video_formats = [ | ||||
|                 f for f in available_formats | ||||
|                 if f.get('acodec') == 'none'] | ||||
|             if video_formats: | ||||
|                 return video_formats[-1] | ||||
|         elif format_spec == 'worstvideo': | ||||
|             video_formats = [ | ||||
|                 f for f in available_formats | ||||
|                 if f.get('acodec') == 'none'] | ||||
|             if video_formats: | ||||
|                 return video_formats[0] | ||||
|         else: | ||||
|             extensions = [u'mp4', u'flv', u'webm', u'3gp'] | ||||
|             extensions = ['mp4', 'flv', 'webm', '3gp'] | ||||
|             if format_spec in extensions: | ||||
|                 filter_f = lambda f: f['ext'] == format_spec | ||||
|             else: | ||||
| @@ -642,8 +700,16 @@ class YoutubeDL(object): | ||||
|             info_dict['playlist'] = None | ||||
|             info_dict['playlist_index'] = None | ||||
|  | ||||
|         if 'display_id' not in info_dict and 'id' in info_dict: | ||||
|             info_dict['display_id'] = info_dict['id'] | ||||
|  | ||||
|         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: | ||||
|             upload_date = datetime.datetime.utcfromtimestamp( | ||||
|                 info_dict['timestamp']) | ||||
|             info_dict['upload_date'] = upload_date.strftime('%Y%m%d') | ||||
|  | ||||
|         # This extractors handle format selection themselves | ||||
|         if info_dict['extractor'] in [u'Youku']: | ||||
|         if info_dict['extractor'] in ['Youku']: | ||||
|             if download: | ||||
|                 self.process_info(info_dict) | ||||
|             return info_dict | ||||
| @@ -655,15 +721,18 @@ class YoutubeDL(object): | ||||
|         else: | ||||
|             formats = info_dict['formats'] | ||||
|  | ||||
|         if not formats: | ||||
|             raise ExtractorError('No video formats found!') | ||||
|  | ||||
|         # We check that all the formats have the format and format_id fields | ||||
|         for (i, format) in enumerate(formats): | ||||
|         for i, format in enumerate(formats): | ||||
|             if format.get('format_id') is None: | ||||
|                 format['format_id'] = compat_str(i) | ||||
|             if format.get('format') is None: | ||||
|                 format['format'] = u'{id} - {res}{note}'.format( | ||||
|                 format['format'] = '{id} - {res}{note}'.format( | ||||
|                     id=format['format_id'], | ||||
|                     res=self.format_resolution(format), | ||||
|                     note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', | ||||
|                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', | ||||
|                 ) | ||||
|             # Automatically determine file extension if missing | ||||
|             if 'ext' not in format: | ||||
| @@ -674,24 +743,20 @@ class YoutubeDL(object): | ||||
|             formats = list(takewhile_inclusive( | ||||
|                 lambda f: f['format_id'] != format_limit, formats | ||||
|             )) | ||||
|         if self.params.get('prefer_free_formats'): | ||||
|             def _free_formats_key(f): | ||||
|                 try: | ||||
|                     ext_ord = [u'flv', u'mp4', u'webm'].index(f['ext']) | ||||
|                 except ValueError: | ||||
|                     ext_ord = -1 | ||||
|                 # We only compare the extension if they have the same height and width | ||||
|                 return (f.get('height') if f.get('height') is not None else -1, | ||||
|                         f.get('width') if f.get('width') is not None else -1, | ||||
|                         ext_ord) | ||||
|             formats = sorted(formats, key=_free_formats_key) | ||||
|  | ||||
|         info_dict['formats'] = formats | ||||
|         # TODO Central sorting goes here | ||||
|  | ||||
|         if formats[0] is not info_dict: | ||||
|             # only set the 'formats' fields if the original info_dict list them | ||||
|             # otherwise we end up with a circular reference, the first (and unique) | ||||
|             # element in the 'formats' field in info_dict is info_dict itself, | ||||
|             # wich can't be exported to json | ||||
|             info_dict['formats'] = formats | ||||
|         if self.params.get('listformats', None): | ||||
|             self.list_formats(info_dict) | ||||
|             return | ||||
|  | ||||
|         req_format = self.params.get('format', 'best') | ||||
|         req_format = self.params.get('format') | ||||
|         if req_format is None: | ||||
|             req_format = 'best' | ||||
|         formats_to_download = [] | ||||
| @@ -699,21 +764,35 @@ class YoutubeDL(object): | ||||
|         if req_format in ('-1', 'all'): | ||||
|             formats_to_download = formats | ||||
|         else: | ||||
|             # We can accept formats requestd in the format: 34/5/best, we pick | ||||
|             # We can accept formats requested in the format: 34/5/best, we pick | ||||
|             # the first that is available, starting from left | ||||
|             req_formats = req_format.split('/') | ||||
|             for rf in req_formats: | ||||
|                 selected_format = self.select_format(rf, formats) | ||||
|                 if re.match(r'.+?\+.+?', rf) is not None: | ||||
|                     # Two formats have been requested like '137+139' | ||||
|                     format_1, format_2 = rf.split('+') | ||||
|                     formats_info = (self.select_format(format_1, formats), | ||||
|                         self.select_format(format_2, formats)) | ||||
|                     if all(formats_info): | ||||
|                         selected_format = { | ||||
|                             'requested_formats': formats_info, | ||||
|                             'format': rf, | ||||
|                             'ext': formats_info[0]['ext'], | ||||
|                         } | ||||
|                     else: | ||||
|                         selected_format = None | ||||
|                 else: | ||||
|                     selected_format = self.select_format(rf, formats) | ||||
|                 if selected_format is not None: | ||||
|                     formats_to_download = [selected_format] | ||||
|                     break | ||||
|         if not formats_to_download: | ||||
|             raise ExtractorError(u'requested format not available', | ||||
|             raise ExtractorError('requested format not available', | ||||
|                                  expected=True) | ||||
|  | ||||
|         if download: | ||||
|             if len(formats_to_download) > 1: | ||||
|                 self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) | ||||
|                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) | ||||
|             for format in formats_to_download: | ||||
|                 new_info = dict(info_dict) | ||||
|                 new_info.update(format) | ||||
| @@ -726,12 +805,15 @@ class YoutubeDL(object): | ||||
|         """Process a single resolved IE result.""" | ||||
|  | ||||
|         assert info_dict.get('_type', 'video') == 'video' | ||||
|         #We increment the download the download count here to match the previous behaviour. | ||||
|         self.increment_downloads() | ||||
|  | ||||
|         max_downloads = self.params.get('max_downloads') | ||||
|         if max_downloads is not None: | ||||
|             if self._num_downloads >= int(max_downloads): | ||||
|                 raise MaxDownloadsReached() | ||||
|  | ||||
|         info_dict['fulltitle'] = info_dict['title'] | ||||
|         if len(info_dict['title']) > 200: | ||||
|             info_dict['title'] = info_dict['title'][:197] + u'...' | ||||
|             info_dict['title'] = info_dict['title'][:197] + '...' | ||||
|  | ||||
|         # Keep for backwards compatibility | ||||
|         info_dict['stitle'] = info_dict['title'] | ||||
| @@ -741,13 +823,10 @@ class YoutubeDL(object): | ||||
|  | ||||
|         reason = self._match_entry(info_dict) | ||||
|         if reason is not None: | ||||
|             self.to_screen(u'[download] ' + reason) | ||||
|             self.to_screen('[download] ' + reason) | ||||
|             return | ||||
|  | ||||
|         max_downloads = self.params.get('max_downloads') | ||||
|         if max_downloads is not None: | ||||
|             if self._num_downloads > int(max_downloads): | ||||
|                 raise MaxDownloadsReached() | ||||
|         self._num_downloads += 1 | ||||
|  | ||||
|         filename = self.prepare_filename(info_dict) | ||||
|  | ||||
| @@ -758,7 +837,7 @@ class YoutubeDL(object): | ||||
|             self.to_stdout(info_dict['id']) | ||||
|         if self.params.get('forceurl', False): | ||||
|             # For RTMP URLs, also include the playpath | ||||
|             self.to_stdout(info_dict['url'] + info_dict.get('play_path', u'')) | ||||
|             self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) | ||||
|         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: | ||||
|             self.to_stdout(info_dict['thumbnail']) | ||||
|         if self.params.get('forcedescription', False) and info_dict.get('description') is not None: | ||||
| @@ -785,37 +864,37 @@ class YoutubeDL(object): | ||||
|             if dn != '' and not os.path.exists(dn): | ||||
|                 os.makedirs(dn) | ||||
|         except (OSError, IOError) as err: | ||||
|             self.report_error(u'unable to create directory ' + compat_str(err)) | ||||
|             self.report_error('unable to create directory ' + compat_str(err)) | ||||
|             return | ||||
|  | ||||
|         if self.params.get('writedescription', False): | ||||
|             descfn = filename + u'.description' | ||||
|             descfn = filename + '.description' | ||||
|             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): | ||||
|                 self.to_screen(u'[info] Video description is already present') | ||||
|                 self.to_screen('[info] Video description is already present') | ||||
|             else: | ||||
|                 try: | ||||
|                     self.to_screen(u'[info] Writing video description to: ' + descfn) | ||||
|                     self.to_screen('[info] Writing video description to: ' + descfn) | ||||
|                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: | ||||
|                         descfile.write(info_dict['description']) | ||||
|                 except (KeyError, TypeError): | ||||
|                     self.report_warning(u'There\'s no description to write.') | ||||
|                     self.report_warning('There\'s no description to write.') | ||||
|                 except (OSError, IOError): | ||||
|                     self.report_error(u'Cannot write description file ' + descfn) | ||||
|                     self.report_error('Cannot write description file ' + descfn) | ||||
|                     return | ||||
|  | ||||
|         if self.params.get('writeannotations', False): | ||||
|             annofn = filename + u'.annotations.xml' | ||||
|             annofn = filename + '.annotations.xml' | ||||
|             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): | ||||
|                 self.to_screen(u'[info] Video annotations are already present') | ||||
|                 self.to_screen('[info] Video annotations are already present') | ||||
|             else: | ||||
|                 try: | ||||
|                     self.to_screen(u'[info] Writing video annotations to: ' + annofn) | ||||
|                     self.to_screen('[info] Writing video annotations to: ' + annofn) | ||||
|                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: | ||||
|                         annofile.write(info_dict['annotations']) | ||||
|                 except (KeyError, TypeError): | ||||
|                     self.report_warning(u'There are no annotations to write.') | ||||
|                     self.report_warning('There are no annotations to write.') | ||||
|                 except (OSError, IOError): | ||||
|                     self.report_error(u'Cannot write annotations file: ' + annofn) | ||||
|                     self.report_error('Cannot write annotations file: ' + annofn) | ||||
|                     return | ||||
|  | ||||
|         subtitles_are_requested = any([self.params.get('writesubtitles', False), | ||||
| @@ -833,46 +912,45 @@ class YoutubeDL(object): | ||||
|                 try: | ||||
|                     sub_filename = subtitles_filename(filename, sub_lang, sub_format) | ||||
|                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): | ||||
|                         self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) | ||||
|                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) | ||||
|                     else: | ||||
|                         self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename) | ||||
|                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename) | ||||
|                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: | ||||
|                                 subfile.write(sub) | ||||
|                 except (OSError, IOError): | ||||
|                     self.report_error(u'Cannot write subtitles file ' + descfn) | ||||
|                     self.report_error('Cannot write subtitles file ' + descfn) | ||||
|                     return | ||||
|  | ||||
|         if self.params.get('writeinfojson', False): | ||||
|             infofn = os.path.splitext(filename)[0] + u'.info.json' | ||||
|             infofn = os.path.splitext(filename)[0] + '.info.json' | ||||
|             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): | ||||
|                 self.to_screen(u'[info] Video description metadata is already present') | ||||
|                 self.to_screen('[info] Video description metadata is already present') | ||||
|             else: | ||||
|                 self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn) | ||||
|                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) | ||||
|                 try: | ||||
|                     json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle']) | ||||
|                     write_json_file(json_info_dict, encodeFilename(infofn)) | ||||
|                     write_json_file(info_dict, encodeFilename(infofn)) | ||||
|                 except (OSError, IOError): | ||||
|                     self.report_error(u'Cannot write metadata to JSON file ' + infofn) | ||||
|                     self.report_error('Cannot write metadata to JSON file ' + infofn) | ||||
|                     return | ||||
|  | ||||
|         if self.params.get('writethumbnail', False): | ||||
|             if info_dict.get('thumbnail') is not None: | ||||
|                 thumb_format = determine_ext(info_dict['thumbnail'], u'jpg') | ||||
|                 thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format | ||||
|                 thumb_format = determine_ext(info_dict['thumbnail'], 'jpg') | ||||
|                 thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format | ||||
|                 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): | ||||
|                     self.to_screen(u'[%s] %s: Thumbnail is already present' % | ||||
|                     self.to_screen('[%s] %s: Thumbnail is already present' % | ||||
|                                    (info_dict['extractor'], info_dict['id'])) | ||||
|                 else: | ||||
|                     self.to_screen(u'[%s] %s: Downloading thumbnail ...' % | ||||
|                     self.to_screen('[%s] %s: Downloading thumbnail ...' % | ||||
|                                    (info_dict['extractor'], info_dict['id'])) | ||||
|                     try: | ||||
|                         uf = compat_urllib_request.urlopen(info_dict['thumbnail']) | ||||
|                         uf = self.urlopen(info_dict['thumbnail']) | ||||
|                         with open(thumb_filename, 'wb') as thumbf: | ||||
|                             shutil.copyfileobj(uf, thumbf) | ||||
|                         self.to_screen(u'[%s] %s: Writing thumbnail to: %s' % | ||||
|                         self.to_screen('[%s] %s: Writing thumbnail to: %s' % | ||||
|                             (info_dict['extractor'], info_dict['id'], thumb_filename)) | ||||
|                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||
|                         self.report_warning(u'Unable to download thumbnail "%s": %s' % | ||||
|                         self.report_warning('Unable to download thumbnail "%s": %s' % | ||||
|                             (info_dict['thumbnail'], compat_str(err))) | ||||
|  | ||||
|         if not self.params.get('skip_download', False): | ||||
| @@ -880,21 +958,49 @@ class YoutubeDL(object): | ||||
|                 success = True | ||||
|             else: | ||||
|                 try: | ||||
|                     success = self.fd._do_download(filename, info_dict) | ||||
|                     def dl(name, info): | ||||
|                         fd = get_suitable_downloader(info)(self, self.params) | ||||
|                         for ph in self._progress_hooks: | ||||
|                             fd.add_progress_hook(ph) | ||||
|                         return fd.download(name, info) | ||||
|                     if info_dict.get('requested_formats') is not None: | ||||
|                         downloaded = [] | ||||
|                         success = True | ||||
|                         merger = FFmpegMergerPP(self) | ||||
|                         if not merger._get_executable(): | ||||
|                             postprocessors = [] | ||||
|                             self.report_warning('You have requested multiple ' | ||||
|                                 'formats but ffmpeg or avconv are not installed.' | ||||
|                                 ' The formats won\'t be merged') | ||||
|                         else: | ||||
|                             postprocessors = [merger] | ||||
|                         for f in info_dict['requested_formats']: | ||||
|                             new_info = dict(info_dict) | ||||
|                             new_info.update(f) | ||||
|                             fname = self.prepare_filename(new_info) | ||||
|                             fname = prepend_extension(fname, 'f%s' % f['format_id']) | ||||
|                             downloaded.append(fname) | ||||
|                             partial_success = dl(fname, new_info) | ||||
|                             success = success and partial_success | ||||
|                         info_dict['__postprocessors'] = postprocessors | ||||
|                         info_dict['__files_to_merge'] = downloaded | ||||
|                     else: | ||||
|                         # Just a single file | ||||
|                         success = dl(filename, info_dict) | ||||
|                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||
|                     self.report_error(u'unable to download video data: %s' % str(err)) | ||||
|                     self.report_error('unable to download video data: %s' % str(err)) | ||||
|                     return | ||||
|                 except (OSError, IOError) as err: | ||||
|                     raise UnavailableVideoError(err) | ||||
|                 except (ContentTooShortError, ) as err: | ||||
|                     self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) | ||||
|                     self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) | ||||
|                     return | ||||
|  | ||||
|             if success: | ||||
|                 try: | ||||
|                     self.post_process(filename, info_dict) | ||||
|                 except (PostProcessingError) as err: | ||||
|                     self.report_error(u'postprocessing: %s' % str(err)) | ||||
|                     self.report_error('postprocessing: %s' % str(err)) | ||||
|                     return | ||||
|  | ||||
|         self.record_download_archive(info_dict) | ||||
| @@ -911,9 +1017,9 @@ class YoutubeDL(object): | ||||
|                 #It also downloads the videos | ||||
|                 self.extract_info(url) | ||||
|             except UnavailableVideoError: | ||||
|                 self.report_error(u'unable to download video') | ||||
|                 self.report_error('unable to download video') | ||||
|             except MaxDownloadsReached: | ||||
|                 self.to_screen(u'[info] Maximum number of downloaded files reached.') | ||||
|                 self.to_screen('[info] Maximum number of downloaded files reached.') | ||||
|                 raise | ||||
|  | ||||
|         return self._download_retcode | ||||
| @@ -926,7 +1032,7 @@ class YoutubeDL(object): | ||||
|         except DownloadError: | ||||
|             webpage_url = info.get('webpage_url') | ||||
|             if webpage_url is not None: | ||||
|                 self.report_warning(u'The info failed to download, trying with "%s"' % webpage_url) | ||||
|                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url) | ||||
|                 return self.download([webpage_url]) | ||||
|             else: | ||||
|                 raise | ||||
| @@ -937,7 +1043,11 @@ class YoutubeDL(object): | ||||
|         info = dict(ie_info) | ||||
|         info['filepath'] = filename | ||||
|         keep_video = None | ||||
|         for pp in self._pps: | ||||
|         pps_chain = [] | ||||
|         if ie_info.get('__postprocessors') is not None: | ||||
|             pps_chain.extend(ie_info['__postprocessors']) | ||||
|         pps_chain.extend(self._pps) | ||||
|         for pp in pps_chain: | ||||
|             try: | ||||
|                 keep_video_wish, new_info = pp.run(info) | ||||
|                 if keep_video_wish is not None: | ||||
| @@ -950,10 +1060,10 @@ class YoutubeDL(object): | ||||
|                 self.report_error(e.msg) | ||||
|         if keep_video is False and not self.params.get('keepvideo', False): | ||||
|             try: | ||||
|                 self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename) | ||||
|                 self.to_screen('Deleting original file %s (pass -k to keep)' % filename) | ||||
|                 os.remove(encodeFilename(filename)) | ||||
|             except (IOError, OSError): | ||||
|                 self.report_warning(u'Unable to remove downloaded video file') | ||||
|                 self.report_warning('Unable to remove downloaded video file') | ||||
|  | ||||
|     def _make_archive_id(self, info_dict): | ||||
|         # Future-proof against any change in case | ||||
| @@ -964,7 +1074,7 @@ class YoutubeDL(object): | ||||
|                 extractor = info_dict.get('ie_key')  # key in a playlist | ||||
|         if extractor is None: | ||||
|             return None  # Incomplete video information | ||||
|         return extractor.lower() + u' ' + info_dict['id'] | ||||
|         return extractor.lower() + ' ' + info_dict['id'] | ||||
|  | ||||
|     def in_download_archive(self, info_dict): | ||||
|         fn = self.params.get('download_archive') | ||||
| @@ -992,53 +1102,72 @@ class YoutubeDL(object): | ||||
|         vid_id = self._make_archive_id(info_dict) | ||||
|         assert vid_id | ||||
|         with locked_file(fn, 'a', encoding='utf-8') as archive_file: | ||||
|             archive_file.write(vid_id + u'\n') | ||||
|             archive_file.write(vid_id + '\n') | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_resolution(format, default='unknown'): | ||||
|         if format.get('vcodec') == 'none': | ||||
|             return 'audio only' | ||||
|         if format.get('_resolution') is not None: | ||||
|             return format['_resolution'] | ||||
|         if format.get('resolution') is not None: | ||||
|             return format['resolution'] | ||||
|         if format.get('height') is not None: | ||||
|             if format.get('width') is not None: | ||||
|                 res = u'%sx%s' % (format['width'], format['height']) | ||||
|                 res = '%sx%s' % (format['width'], format['height']) | ||||
|             else: | ||||
|                 res = u'%sp' % format['height'] | ||||
|                 res = '%sp' % format['height'] | ||||
|         elif format.get('width') is not None: | ||||
|             res = '?x%d' % format['width'] | ||||
|         else: | ||||
|             res = default | ||||
|         return res | ||||
|  | ||||
|     def list_formats(self, info_dict): | ||||
|         def format_note(fdict): | ||||
|             res = u'' | ||||
|             res = '' | ||||
|             if fdict.get('ext') in ['f4f', 'f4m']: | ||||
|                 res += '(unsupported) ' | ||||
|             if fdict.get('format_note') is not None: | ||||
|                 res += fdict['format_note'] + u' ' | ||||
|                 res += fdict['format_note'] + ' ' | ||||
|             if fdict.get('tbr') is not None: | ||||
|                 res += '%4dk ' % fdict['tbr'] | ||||
|             if fdict.get('container') is not None: | ||||
|                 if res: | ||||
|                     res += ', ' | ||||
|                 res += '%s container' % fdict['container'] | ||||
|             if (fdict.get('vcodec') is not None and | ||||
|                     fdict.get('vcodec') != 'none'): | ||||
|                 res += u'%-5s' % fdict['vcodec'] | ||||
|             elif fdict.get('vbr') is not None: | ||||
|                 res += u'video' | ||||
|                 if res: | ||||
|                     res += ', ' | ||||
|                 res += fdict['vcodec'] | ||||
|                 if fdict.get('vbr') is not None: | ||||
|                     res += '@' | ||||
|             elif fdict.get('vbr') is not None and fdict.get('abr') is not None: | ||||
|                 res += 'video@' | ||||
|             if fdict.get('vbr') is not None: | ||||
|                 res += u'@%4dk' % fdict['vbr'] | ||||
|                 res += '%4dk' % fdict['vbr'] | ||||
|             if fdict.get('acodec') is not None: | ||||
|                 if res: | ||||
|                     res += u', ' | ||||
|                 res += u'%-5s' % fdict['acodec'] | ||||
|                     res += ', ' | ||||
|                 if fdict['acodec'] == 'none': | ||||
|                     res += 'video only' | ||||
|                 else: | ||||
|                     res += '%-5s' % fdict['acodec'] | ||||
|             elif fdict.get('abr') is not None: | ||||
|                 if res: | ||||
|                     res += u', ' | ||||
|                     res += ', ' | ||||
|                 res += 'audio' | ||||
|             if fdict.get('abr') is not None: | ||||
|                 res += u'@%3dk' % fdict['abr'] | ||||
|                 res += '@%3dk' % fdict['abr'] | ||||
|             if fdict.get('asr') is not None: | ||||
|                 res += ' (%5dHz)' % fdict['asr'] | ||||
|             if fdict.get('filesize') is not None: | ||||
|                 if res: | ||||
|                     res += u', ' | ||||
|                     res += ', ' | ||||
|                 res += format_bytes(fdict['filesize']) | ||||
|             return res | ||||
|  | ||||
|         def line(format, idlen=20): | ||||
|             return ((u'%-' + compat_str(idlen + 1) + u's%-10s%-12s%s') % ( | ||||
|             return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % ( | ||||
|                 format['format_id'], | ||||
|                 format['ext'], | ||||
|                 self.format_resolution(format), | ||||
| @@ -1046,7 +1175,7 @@ class YoutubeDL(object): | ||||
|             )) | ||||
|  | ||||
|         formats = info_dict.get('formats', [info_dict]) | ||||
|         idlen = max(len(u'format code'), | ||||
|         idlen = max(len('format code'), | ||||
|                     max(len(f['format_id']) for f in formats)) | ||||
|         formats_s = [line(f, idlen) for f in formats] | ||||
|         if len(formats) > 1: | ||||
| @@ -1054,19 +1183,19 @@ class YoutubeDL(object): | ||||
|             formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)' | ||||
|  | ||||
|         header_line = line({ | ||||
|             'format_id': u'format code', 'ext': u'extension', | ||||
|             '_resolution': u'resolution', 'format_note': u'note'}, idlen=idlen) | ||||
|         self.to_screen(u'[info] Available formats for %s:\n%s\n%s' % | ||||
|                        (info_dict['id'], header_line, u"\n".join(formats_s))) | ||||
|             'format_id': 'format code', 'ext': 'extension', | ||||
|             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen) | ||||
|         self.to_screen('[info] Available formats for %s:\n%s\n%s' % | ||||
|                        (info_dict['id'], header_line, '\n'.join(formats_s))) | ||||
|  | ||||
|     def urlopen(self, req): | ||||
|         """ Start an HTTP download """ | ||||
|         return self._opener.open(req) | ||||
|         return self._opener.open(req, timeout=self._socket_timeout) | ||||
|  | ||||
|     def print_debug_header(self): | ||||
|         if not self.params.get('verbose'): | ||||
|             return | ||||
|         write_string(u'[debug] youtube-dl version ' + __version__ + u'\n') | ||||
|         write_string('[debug] youtube-dl version ' + __version__ + '\n') | ||||
|         try: | ||||
|             sp = subprocess.Popen( | ||||
|                 ['git', 'rev-parse', '--short', 'HEAD'], | ||||
| @@ -1075,24 +1204,24 @@ class YoutubeDL(object): | ||||
|             out, err = sp.communicate() | ||||
|             out = out.decode().strip() | ||||
|             if re.match('[0-9a-f]+', out): | ||||
|                 write_string(u'[debug] Git HEAD: ' + out + u'\n') | ||||
|                 write_string('[debug] Git HEAD: ' + out + '\n') | ||||
|         except: | ||||
|             try: | ||||
|                 sys.exc_clear() | ||||
|             except: | ||||
|                 pass | ||||
|         write_string(u'[debug] Python version %s - %s' % | ||||
|                      (platform.python_version(), platform_name()) + u'\n') | ||||
|         write_string('[debug] Python version %s - %s' % | ||||
|                      (platform.python_version(), platform_name()) + '\n') | ||||
|  | ||||
|         proxy_map = {} | ||||
|         for handler in self._opener.handlers: | ||||
|             if hasattr(handler, 'proxies'): | ||||
|                 proxy_map.update(handler.proxies) | ||||
|         write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n') | ||||
|         write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') | ||||
|  | ||||
|     def _setup_opener(self): | ||||
|         timeout_val = self.params.get('socket_timeout') | ||||
|         timeout = 600 if timeout_val is None else float(timeout_val) | ||||
|         self._socket_timeout = 600 if timeout_val is None else float(timeout_val) | ||||
|  | ||||
|         opts_cookiefile = self.params.get('cookiefile') | ||||
|         opts_proxy = self.params.get('proxy') | ||||
| @@ -1118,16 +1247,15 @@ class YoutubeDL(object): | ||||
|             if 'http' in proxies and 'https' not in proxies: | ||||
|                 proxies['https'] = proxies['http'] | ||||
|         proxy_handler = compat_urllib_request.ProxyHandler(proxies) | ||||
|  | ||||
|         debuglevel = 1 if self.params.get('debug_printtraffic') else 0 | ||||
|         https_handler = make_HTTPS_handler( | ||||
|             self.params.get('nocheckcertificate', False)) | ||||
|             self.params.get('nocheckcertificate', False), debuglevel=debuglevel) | ||||
|         ydlh = YoutubeDLHandler(debuglevel=debuglevel) | ||||
|         opener = compat_urllib_request.build_opener( | ||||
|             https_handler, proxy_handler, cookie_processor, YoutubeDLHandler()) | ||||
|             https_handler, proxy_handler, cookie_processor, ydlh) | ||||
|         # Delete the default user-agent header, which would otherwise apply in | ||||
|         # cases where our custom HTTP handler doesn't come into play | ||||
|         # (See https://github.com/rg3/youtube-dl/issues/1309 for details) | ||||
|         opener.addheaders = [] | ||||
|         self._opener = opener | ||||
|  | ||||
|         # TODO remove this global modification | ||||
|         compat_urllib_request.install_opener(opener) | ||||
|         socket.setdefaulttimeout(timeout) | ||||
|   | ||||
| @@ -38,12 +38,26 @@ __authors__  = ( | ||||
|     'Takuya Tsuchida', | ||||
|     'Sergey M.', | ||||
|     'Michael Orlitzky', | ||||
|     'Chris Gahan', | ||||
|     'Saimadhav Heblikar', | ||||
|     'Mike Col', | ||||
|     'Oleg Prutz', | ||||
|     'pulpe', | ||||
|     'Andreas Schmitz', | ||||
|     'Michael Kaiser', | ||||
|     'Niklas Laxström', | ||||
|     'David Triendl', | ||||
|     'Anthony Weems', | ||||
|     'David Wagner', | ||||
|     'Juan C. Olivares', | ||||
|     'Mattias Harrysson', | ||||
| ) | ||||
|  | ||||
| __license__ = 'Public Domain' | ||||
|  | ||||
| import codecs | ||||
| import getpass | ||||
| import io | ||||
| import locale | ||||
| import optparse | ||||
| import os | ||||
| import random | ||||
| @@ -53,6 +67,7 @@ import sys | ||||
|  | ||||
|  | ||||
| from .utils import ( | ||||
|     compat_getpass, | ||||
|     compat_print, | ||||
|     DateRange, | ||||
|     decodeOption, | ||||
| @@ -61,6 +76,7 @@ from .utils import ( | ||||
|     get_cachedir, | ||||
|     MaxDownloadsReached, | ||||
|     preferredencoding, | ||||
|     read_batch_urls, | ||||
|     SameFileError, | ||||
|     setproctitle, | ||||
|     std_headers, | ||||
| @@ -73,11 +89,12 @@ from .FileDownloader import ( | ||||
| from .extractor import gen_extractors | ||||
| from .version import __version__ | ||||
| from .YoutubeDL import YoutubeDL | ||||
| from .PostProcessor import ( | ||||
| from .postprocessor import ( | ||||
|     FFmpegMetadataPP, | ||||
|     FFmpegVideoConvertor, | ||||
|     FFmpegExtractAudioPP, | ||||
|     FFmpegEmbedSubtitlePP, | ||||
|     XAttrMetadataPP, | ||||
| ) | ||||
|  | ||||
|  | ||||
| @@ -95,6 +112,43 @@ def parseOpts(overrideArguments=None): | ||||
|             optionf.close() | ||||
|         return res | ||||
|  | ||||
|     def _readUserConf(): | ||||
|         xdg_config_home = os.environ.get('XDG_CONFIG_HOME') | ||||
|         if xdg_config_home: | ||||
|             userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') | ||||
|             if not os.path.isfile(userConfFile): | ||||
|                 userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') | ||||
|         else: | ||||
|             userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config') | ||||
|             if not os.path.isfile(userConfFile): | ||||
|                 userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') | ||||
|         userConf = _readOptions(userConfFile, None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             appdata_dir = os.environ.get('appdata') | ||||
|             if appdata_dir: | ||||
|                 userConf = _readOptions( | ||||
|                     os.path.join(appdata_dir, 'youtube-dl', 'config'), | ||||
|                     default=None) | ||||
|                 if userConf is None: | ||||
|                     userConf = _readOptions( | ||||
|                         os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), | ||||
|                         default=None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             userConf = _readOptions( | ||||
|                 os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'), | ||||
|                 default=None) | ||||
|         if userConf is None: | ||||
|             userConf = _readOptions( | ||||
|                 os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'), | ||||
|                 default=None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             userConf = [] | ||||
|  | ||||
|         return userConf | ||||
|  | ||||
|     def _format_option_string(option): | ||||
|         ''' ('-o', '--option') -> -o, --format METAVAR''' | ||||
|  | ||||
| @@ -161,7 +215,7 @@ def parseOpts(overrideArguments=None): | ||||
|     general.add_option('-U', '--update', | ||||
|             action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') | ||||
|     general.add_option('-i', '--ignore-errors', | ||||
|             action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False) | ||||
|             action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False) | ||||
|     general.add_option('--abort-on-error', | ||||
|             action='store_false', dest='ignoreerrors', | ||||
|             help='Abort downloading of further videos (in the playlist or the command line) if an error occurs') | ||||
| @@ -183,19 +237,28 @@ def parseOpts(overrideArguments=None): | ||||
|         '--proxy', dest='proxy', default=None, metavar='URL', | ||||
|         help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection') | ||||
|     general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.') | ||||
|     general.add_option( | ||||
|         '--prefer-insecure', action='store_true', dest='prefer_insecure', | ||||
|         help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') | ||||
|     general.add_option( | ||||
|         '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR', | ||||
|         help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .') | ||||
|         help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.') | ||||
|     general.add_option( | ||||
|         '--no-cache-dir', action='store_const', const=None, dest='cachedir', | ||||
|         help='Disable filesystem caching') | ||||
|     general.add_option( | ||||
|         '--socket-timeout', dest='socket_timeout', | ||||
|         type=float, default=None, help=optparse.SUPPRESS_HELP) | ||||
|         type=float, default=None, help=u'Time to wait before giving up, in seconds') | ||||
|     general.add_option( | ||||
|         '--bidi-workaround', dest='bidi_workaround', action='store_true', | ||||
|         help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') | ||||
|  | ||||
|     general.add_option('--default-search', | ||||
|             dest='default_search', metavar='PREFIX', | ||||
|             help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for  youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.') | ||||
|     general.add_option( | ||||
|         '--ignore-config', | ||||
|         action='store_true', | ||||
|         help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)') | ||||
|  | ||||
|     selection.add_option( | ||||
|         '--playlist-start', | ||||
| @@ -213,8 +276,12 @@ def parseOpts(overrideArguments=None): | ||||
|     selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None) | ||||
|     selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None) | ||||
|     selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None) | ||||
|     selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None) | ||||
|     selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None) | ||||
|     selection.add_option( | ||||
|         '--datebefore', metavar='DATE', dest='datebefore', default=None, | ||||
|         help='download only videos uploaded on or before this date (i.e. inclusive)') | ||||
|     selection.add_option( | ||||
|         '--dateafter', metavar='DATE', dest='dateafter', default=None, | ||||
|         help='download only videos uploaded on or after this date (i.e. inclusive)') | ||||
|     selection.add_option( | ||||
|         '--min-views', metavar='COUNT', dest='min_views', | ||||
|         default=None, type=int, | ||||
| @@ -230,7 +297,14 @@ def parseOpts(overrideArguments=None): | ||||
|     selection.add_option('--download-archive', metavar='FILE', | ||||
|                          dest='download_archive', | ||||
|                          help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.') | ||||
|  | ||||
|     selection.add_option( | ||||
|         '--include-ads', dest='include_ads', | ||||
|         action='store_true', | ||||
|         help='Download advertisements as well (experimental)') | ||||
|     selection.add_option( | ||||
|         '--youtube-include-dash-manifest', action='store_true', | ||||
|         dest='youtube_include_dash_manifest', default=False, | ||||
|         help='Try to download the DASH manifest on YouTube videos (experimental)') | ||||
|  | ||||
|     authentication.add_option('-u', '--username', | ||||
|             dest='username', metavar='USERNAME', help='account username') | ||||
| @@ -239,12 +313,12 @@ def parseOpts(overrideArguments=None): | ||||
|     authentication.add_option('-n', '--netrc', | ||||
|             action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) | ||||
|     authentication.add_option('--video-password', | ||||
|             dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)') | ||||
|             dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)') | ||||
|  | ||||
|  | ||||
|     video_format.add_option('-f', '--format', | ||||
|             action='store', dest='format', metavar='FORMAT', default='best', | ||||
|             help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported') | ||||
|             action='store', dest='format', metavar='FORMAT', default=None, | ||||
|             help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.') | ||||
|     video_format.add_option('--all-formats', | ||||
|             action='store_const', dest='format', help='download all available video formats', const='all') | ||||
|     video_format.add_option('--prefer-free-formats', | ||||
| @@ -252,7 +326,7 @@ def parseOpts(overrideArguments=None): | ||||
|     video_format.add_option('--max-quality', | ||||
|             action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') | ||||
|     video_format.add_option('-F', '--list-formats', | ||||
|             action='store_true', dest='listformats', help='list all available formats (currently youtube only)') | ||||
|             action='store_true', dest='listformats', help='list all available formats') | ||||
|  | ||||
|     subtitles.add_option('--write-sub', '--write-srt', | ||||
|             action='store_true', dest='writesubtitles', | ||||
| @@ -326,13 +400,16 @@ def parseOpts(overrideArguments=None): | ||||
|             action='store_true', dest='verbose', help='print various debugging information', default=False) | ||||
|     verbosity.add_option('--dump-intermediate-pages', | ||||
|             action='store_true', dest='dump_intermediate_pages', default=False, | ||||
|             help='print downloaded pages to debug problems(very verbose)') | ||||
|             help='print downloaded pages to debug problems (very verbose)') | ||||
|     verbosity.add_option('--write-pages', | ||||
|             action='store_true', dest='write_pages', default=False, | ||||
|             help='Write downloaded intermediary pages to files in the current directory to debug problems') | ||||
|     verbosity.add_option('--youtube-print-sig-code', | ||||
|             action='store_true', dest='youtube_print_sig_code', default=False, | ||||
|             help=optparse.SUPPRESS_HELP) | ||||
|     verbosity.add_option('--print-traffic', | ||||
|             dest='debug_printtraffic', action='store_true', default=False, | ||||
|             help='Display sent and read HTTP traffic') | ||||
|  | ||||
|  | ||||
|     filesystem.add_option('-t', '--title', | ||||
| @@ -350,12 +427,14 @@ def parseOpts(overrideArguments=None): | ||||
|                   '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, ' | ||||
|                   '%(autonumber)s to get an automatically incremented number, ' | ||||
|                   '%(ext)s for the filename extension, ' | ||||
|                   '%(format)s for the format description (like "22 - 1280x720" or "HD"),' | ||||
|                   '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),' | ||||
|                   '%(format)s for the format description (like "22 - 1280x720" or "HD"), ' | ||||
|                   '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), ' | ||||
|                   '%(upload_date)s for the upload date (YYYYMMDD), ' | ||||
|                   '%(extractor)s for the provider (youtube, metacafe, etc), ' | ||||
|                   '%(id)s for the video id , %(playlist)s for the playlist the video is in, ' | ||||
|                   '%(id)s for the video id, %(playlist)s for the playlist the video is in, ' | ||||
|                   '%(playlist_index)s for the position in the playlist and %% for a literal percent. ' | ||||
|                   '%(height)s and %(width)s for the width and height of the video format. ' | ||||
|                   '%(resolution)s for a textual description of the resolution of the video format. ' | ||||
|                   'Use - to output to stdout. Can also be used to download to a different directory, ' | ||||
|                   'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')) | ||||
|     filesystem.add_option('--autonumber-size', | ||||
| @@ -368,7 +447,7 @@ def parseOpts(overrideArguments=None): | ||||
|             dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') | ||||
|     filesystem.add_option('--load-info', | ||||
|             dest='load_info_filename', metavar='FILE', | ||||
|             help='json file containing the video information (created with the "--write-json" option') | ||||
|             help='json file containing the video information (created with the "--write-json" option)') | ||||
|     filesystem.add_option('-w', '--no-overwrites', | ||||
|             action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) | ||||
|     filesystem.add_option('-c', '--continue', | ||||
| @@ -412,7 +491,13 @@ def parseOpts(overrideArguments=None): | ||||
|     postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False, | ||||
|             help='embed subtitles in the video (only for mp4 videos)') | ||||
|     postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False, | ||||
|             help='add metadata to the files') | ||||
|             help='write metadata to the video file') | ||||
|     postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False, | ||||
|             help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)') | ||||
|     postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg', | ||||
|         help='Prefer avconv over ffmpeg for running the postprocessors (default)') | ||||
|     postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg', | ||||
|         help='Prefer ffmpeg over avconv for running the postprocessors') | ||||
|  | ||||
|  | ||||
|     parser.add_option_group(general) | ||||
| @@ -430,49 +515,25 @@ def parseOpts(overrideArguments=None): | ||||
|         if opts.verbose: | ||||
|             write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n') | ||||
|     else: | ||||
|         systemConf = _readOptions('/etc/youtube-dl.conf') | ||||
|  | ||||
|         xdg_config_home = os.environ.get('XDG_CONFIG_HOME') | ||||
|         if xdg_config_home: | ||||
|             userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') | ||||
|             if not os.path.isfile(userConfFile): | ||||
|                 userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') | ||||
|         else: | ||||
|             userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config') | ||||
|             if not os.path.isfile(userConfFile): | ||||
|                 userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') | ||||
|         userConf = _readOptions(userConfFile, None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             appdata_dir = os.environ.get('appdata') | ||||
|             if appdata_dir: | ||||
|                 userConf = _readOptions( | ||||
|                     os.path.join(appdata_dir, 'youtube-dl', 'config'), | ||||
|                     default=None) | ||||
|                 if userConf is None: | ||||
|                     userConf = _readOptions( | ||||
|                         os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), | ||||
|                         default=None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             userConf = _readOptions( | ||||
|                 os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'), | ||||
|                 default=None) | ||||
|         if userConf is None: | ||||
|             userConf = _readOptions( | ||||
|                 os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'), | ||||
|                 default=None) | ||||
|  | ||||
|         if userConf is None: | ||||
|             userConf = [] | ||||
|  | ||||
|         commandLineConf = sys.argv[1:] | ||||
|         if '--ignore-config' in commandLineConf: | ||||
|             systemConf = [] | ||||
|             userConf = [] | ||||
|         else: | ||||
|             systemConf = _readOptions('/etc/youtube-dl.conf') | ||||
|             if '--ignore-config' in systemConf: | ||||
|                 userConf = [] | ||||
|             else: | ||||
|                 userConf = _readUserConf() | ||||
|         argv = systemConf + userConf + commandLineConf | ||||
|  | ||||
|         opts, args = parser.parse_args(argv) | ||||
|         if opts.verbose: | ||||
|             write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n') | ||||
|             write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n') | ||||
|             write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n') | ||||
|             write_string(u'[debug] Encodings: locale %r, fs %r, out %r, pref: %r\n' % | ||||
|                          (locale.getpreferredencoding(), sys.getfilesystemencoding(), sys.stdout.encoding, preferredencoding())) | ||||
|  | ||||
|     return parser, opts, args | ||||
|  | ||||
| @@ -501,22 +562,22 @@ def _real_main(argv=None): | ||||
|         sys.exit(0) | ||||
|  | ||||
|     # Batch file verification | ||||
|     batchurls = [] | ||||
|     batch_urls = [] | ||||
|     if opts.batchfile is not None: | ||||
|         try: | ||||
|             if opts.batchfile == '-': | ||||
|                 batchfd = sys.stdin | ||||
|             else: | ||||
|                 batchfd = open(opts.batchfile, 'r') | ||||
|             batchurls = batchfd.readlines() | ||||
|             batchurls = [x.strip() for x in batchurls] | ||||
|             batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] | ||||
|                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') | ||||
|             batch_urls = read_batch_urls(batchfd) | ||||
|             if opts.verbose: | ||||
|                 write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n') | ||||
|                 write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n') | ||||
|         except IOError: | ||||
|             sys.exit(u'ERROR: batch file could not be read') | ||||
|     all_urls = batchurls + args | ||||
|     all_urls = batch_urls + args | ||||
|     all_urls = [url.strip() for url in all_urls] | ||||
|     _enc = preferredencoding() | ||||
|     all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] | ||||
|  | ||||
|     extractors = gen_extractors() | ||||
|  | ||||
| @@ -546,13 +607,13 @@ def _real_main(argv=None): | ||||
|     if opts.usenetrc and (opts.username is not None or opts.password is not None): | ||||
|         parser.error(u'using .netrc conflicts with giving username/password') | ||||
|     if opts.password is not None and opts.username is None: | ||||
|         parser.error(u' account username missing\n') | ||||
|         parser.error(u'account username missing\n') | ||||
|     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): | ||||
|         parser.error(u'using output template conflicts with using title, video ID or auto number') | ||||
|     if opts.usetitle and opts.useid: | ||||
|         parser.error(u'using title conflicts with using video ID') | ||||
|     if opts.username is not None and opts.password is None: | ||||
|         opts.password = getpass.getpass(u'Type account password and press return:') | ||||
|         opts.password = compat_getpass(u'Type account password and press [Return]: ') | ||||
|     if opts.ratelimit is not None: | ||||
|         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) | ||||
|         if numeric_limit is None: | ||||
| @@ -596,6 +657,12 @@ def _real_main(argv=None): | ||||
|         date = DateRange.day(opts.date) | ||||
|     else: | ||||
|         date = DateRange(opts.dateafter, opts.datebefore) | ||||
|     if opts.default_search not in ('auto', None) and ':' not in opts.default_search: | ||||
|         parser.error(u'--default-search invalid; did you forget a colon (:) at the end?') | ||||
|  | ||||
|     # Do not download videos when there are audio-only formats | ||||
|     if opts.extractaudio and not opts.keepvideo and opts.format is None: | ||||
|         opts.format = 'bestaudio/best' | ||||
|  | ||||
|     # --all-sub automatically sets --write-sub if --write-auto-sub is not given | ||||
|     # this was the old behaviour if only --all-sub was given. | ||||
| @@ -620,6 +687,7 @@ def _real_main(argv=None): | ||||
|                      u' template'.format(outtmpl)) | ||||
|  | ||||
|     any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson | ||||
|     download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive | ||||
|  | ||||
|     ydl_opts = { | ||||
|         'usenetrc': opts.usenetrc, | ||||
| @@ -687,12 +755,18 @@ def _real_main(argv=None): | ||||
|         'cachedir': opts.cachedir, | ||||
|         'youtube_print_sig_code': opts.youtube_print_sig_code, | ||||
|         'age_limit': opts.age_limit, | ||||
|         'download_archive': opts.download_archive, | ||||
|         'download_archive': download_archive_fn, | ||||
|         'cookiefile': opts.cookiefile, | ||||
|         'nocheckcertificate': opts.no_check_certificate, | ||||
|         'prefer_insecure': opts.prefer_insecure, | ||||
|         'proxy': opts.proxy, | ||||
|         'socket_timeout': opts.socket_timeout, | ||||
|         'bidi_workaround': opts.bidi_workaround, | ||||
|         'debug_printtraffic': opts.debug_printtraffic, | ||||
|         'prefer_ffmpeg': opts.prefer_ffmpeg, | ||||
|         'include_ads': opts.include_ads, | ||||
|         'default_search': opts.default_search, | ||||
|         'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, | ||||
|     } | ||||
|  | ||||
|     with YoutubeDL(ydl_opts) as ydl: | ||||
| @@ -709,6 +783,8 @@ def _real_main(argv=None): | ||||
|             ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) | ||||
|         if opts.embedsubtitles: | ||||
|             ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) | ||||
|         if opts.xattrs: | ||||
|             ydl.add_post_processor(XAttrMetadataPP()) | ||||
|  | ||||
|         # Update version | ||||
|         if opts.update_self: | ||||
|   | ||||
							
								
								
									
										29
									
								
								youtube_dl/downloader/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								youtube_dl/downloader/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from .hls import HlsFD | ||||
| from .http import HttpFD | ||||
| from .mplayer import MplayerFD | ||||
| from .rtmp import RtmpFD | ||||
| from .f4m import F4mFD | ||||
|  | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
| ) | ||||
|  | ||||
|  | ||||
| def get_suitable_downloader(info_dict): | ||||
|     """Get the downloader class that can handle the info dict.""" | ||||
|     url = info_dict['url'] | ||||
|     protocol = info_dict.get('protocol') | ||||
|  | ||||
|     if url.startswith('rtmp'): | ||||
|         return RtmpFD | ||||
|     if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'): | ||||
|         return HlsFD | ||||
|     if url.startswith('mms') or url.startswith('rtsp'): | ||||
|         return MplayerFD | ||||
|     if determine_ext(url) == 'f4m': | ||||
|         return F4mFD | ||||
|     else: | ||||
|         return HttpFD | ||||
							
								
								
									
										316
									
								
								youtube_dl/downloader/common.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										316
									
								
								youtube_dl/downloader/common.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,316 @@ | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
| import time | ||||
|  | ||||
| from ..utils import ( | ||||
|     encodeFilename, | ||||
|     timeconvert, | ||||
|     format_bytes, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FileDownloader(object): | ||||
|     """File Downloader class. | ||||
|  | ||||
|     File downloader objects are the ones responsible of downloading the | ||||
|     actual video file and writing it to disk. | ||||
|  | ||||
|     File downloaders accept a lot of parameters. In order not to saturate | ||||
|     the object constructor with arguments, it receives a dictionary of | ||||
|     options instead. | ||||
|  | ||||
|     Available options: | ||||
|  | ||||
|     verbose:           Print additional info to stdout. | ||||
|     quiet:             Do not print messages to stdout. | ||||
|     ratelimit:         Download speed limit, in bytes/sec. | ||||
|     retries:           Number of times to retry for HTTP error 5xx | ||||
|     buffersize:        Size of download buffer in bytes. | ||||
|     noresizebuffer:    Do not automatically resize the download buffer. | ||||
|     continuedl:        Try to continue downloads if possible. | ||||
|     noprogress:        Do not print the progress bar. | ||||
|     logtostderr:       Log messages to stderr instead of stdout. | ||||
|     consoletitle:      Display progress in console window's titlebar. | ||||
|     nopart:            Do not use temporary .part files. | ||||
|     updatetime:        Use the Last-modified header to set output file timestamps. | ||||
|     test:              Download only first bytes to test the downloader. | ||||
|     min_filesize:      Skip files smaller than this size | ||||
|     max_filesize:      Skip files larger than this size | ||||
|  | ||||
|     Subclasses of this one must re-define the real_download method. | ||||
|     """ | ||||
|  | ||||
|     params = None | ||||
|  | ||||
|     def __init__(self, ydl, params): | ||||
|         """Create a FileDownloader object with the given options.""" | ||||
|         self.ydl = ydl | ||||
|         self._progress_hooks = [] | ||||
|         self.params = params | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_seconds(seconds): | ||||
|         (mins, secs) = divmod(seconds, 60) | ||||
|         (hours, mins) = divmod(mins, 60) | ||||
|         if hours > 99: | ||||
|             return '--:--:--' | ||||
|         if hours == 0: | ||||
|             return '%02d:%02d' % (mins, secs) | ||||
|         else: | ||||
|             return '%02d:%02d:%02d' % (hours, mins, secs) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_percent(byte_counter, data_len): | ||||
|         if data_len is None: | ||||
|             return None | ||||
|         return float(byte_counter) / float(data_len) * 100.0 | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_percent(percent): | ||||
|         if percent is None: | ||||
|             return '---.-%' | ||||
|         return '%6s' % ('%3.1f%%' % percent) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_eta(start, now, total, current): | ||||
|         if total is None: | ||||
|             return None | ||||
|         dif = now - start | ||||
|         if current == 0 or dif < 0.001: # One millisecond | ||||
|             return None | ||||
|         rate = float(current) / dif | ||||
|         return int((float(total) - float(current)) / rate) | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_eta(eta): | ||||
|         if eta is None: | ||||
|             return '--:--' | ||||
|         return FileDownloader.format_seconds(eta) | ||||
|  | ||||
|     @staticmethod | ||||
|     def calc_speed(start, now, bytes): | ||||
|         dif = now - start | ||||
|         if bytes == 0 or dif < 0.001: # One millisecond | ||||
|             return None | ||||
|         return float(bytes) / dif | ||||
|  | ||||
|     @staticmethod | ||||
|     def format_speed(speed): | ||||
|         if speed is None: | ||||
|             return '%10s' % '---b/s' | ||||
|         return '%10s' % ('%s/s' % format_bytes(speed)) | ||||
|  | ||||
|     @staticmethod | ||||
|     def best_block_size(elapsed_time, bytes): | ||||
|         new_min = max(bytes / 2.0, 1.0) | ||||
|         new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB | ||||
|         if elapsed_time < 0.001: | ||||
|             return int(new_max) | ||||
|         rate = bytes / elapsed_time | ||||
|         if rate > new_max: | ||||
|             return int(new_max) | ||||
|         if rate < new_min: | ||||
|             return int(new_min) | ||||
|         return int(rate) | ||||
|  | ||||
|     @staticmethod | ||||
|     def parse_bytes(bytestr): | ||||
|         """Parse a string indicating a byte quantity into an integer.""" | ||||
|         matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) | ||||
|         if matchobj is None: | ||||
|             return None | ||||
|         number = float(matchobj.group(1)) | ||||
|         multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) | ||||
|         return int(round(number * multiplier)) | ||||
|  | ||||
|     def to_screen(self, *args, **kargs): | ||||
|         self.ydl.to_screen(*args, **kargs) | ||||
|  | ||||
|     def to_stderr(self, message): | ||||
|         self.ydl.to_screen(message) | ||||
|  | ||||
|     def to_console_title(self, message): | ||||
|         self.ydl.to_console_title(message) | ||||
|  | ||||
|     def trouble(self, *args, **kargs): | ||||
|         self.ydl.trouble(*args, **kargs) | ||||
|  | ||||
|     def report_warning(self, *args, **kargs): | ||||
|         self.ydl.report_warning(*args, **kargs) | ||||
|  | ||||
|     def report_error(self, *args, **kargs): | ||||
|         self.ydl.report_error(*args, **kargs) | ||||
|  | ||||
|     def slow_down(self, start_time, byte_counter): | ||||
|         """Sleep if the download speed is over the rate limit.""" | ||||
|         rate_limit = self.params.get('ratelimit', None) | ||||
|         if rate_limit is None or byte_counter == 0: | ||||
|             return | ||||
|         now = time.time() | ||||
|         elapsed = now - start_time | ||||
|         if elapsed <= 0.0: | ||||
|             return | ||||
|         speed = float(byte_counter) / elapsed | ||||
|         if speed > rate_limit: | ||||
|             time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) | ||||
|  | ||||
|     def temp_name(self, filename): | ||||
|         """Returns a temporary filename for the given filename.""" | ||||
|         if self.params.get('nopart', False) or filename == u'-' or \ | ||||
|                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): | ||||
|             return filename | ||||
|         return filename + u'.part' | ||||
|  | ||||
|     def undo_temp_name(self, filename): | ||||
|         if filename.endswith(u'.part'): | ||||
|             return filename[:-len(u'.part')] | ||||
|         return filename | ||||
|  | ||||
|     def try_rename(self, old_filename, new_filename): | ||||
|         try: | ||||
|             if old_filename == new_filename: | ||||
|                 return | ||||
|             os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) | ||||
|         except (IOError, OSError) as err: | ||||
|             self.report_error(u'unable to rename file: %s' % str(err)) | ||||
|  | ||||
|     def try_utime(self, filename, last_modified_hdr): | ||||
|         """Try to set the last-modified time of the given file.""" | ||||
|         if last_modified_hdr is None: | ||||
|             return | ||||
|         if not os.path.isfile(encodeFilename(filename)): | ||||
|             return | ||||
|         timestr = last_modified_hdr | ||||
|         if timestr is None: | ||||
|             return | ||||
|         filetime = timeconvert(timestr) | ||||
|         if filetime is None: | ||||
|             return filetime | ||||
|         # Ignore obviously invalid dates | ||||
|         if filetime == 0: | ||||
|             return | ||||
|         try: | ||||
|             os.utime(filename, (time.time(), filetime)) | ||||
|         except: | ||||
|             pass | ||||
|         return filetime | ||||
|  | ||||
|     def report_destination(self, filename): | ||||
|         """Report destination filename.""" | ||||
|         self.to_screen(u'[download] Destination: ' + filename) | ||||
|  | ||||
|     def _report_progress_status(self, msg, is_last_line=False): | ||||
|         fullmsg = u'[download] ' + msg | ||||
|         if self.params.get('progress_with_newline', False): | ||||
|             self.to_screen(fullmsg) | ||||
|         else: | ||||
|             if os.name == 'nt': | ||||
|                 prev_len = getattr(self, '_report_progress_prev_line_length', | ||||
|                                    0) | ||||
|                 if prev_len > len(fullmsg): | ||||
|                     fullmsg += u' ' * (prev_len - len(fullmsg)) | ||||
|                 self._report_progress_prev_line_length = len(fullmsg) | ||||
|                 clear_line = u'\r' | ||||
|             else: | ||||
|                 clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r') | ||||
|             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) | ||||
|         self.to_console_title(u'youtube-dl ' + msg) | ||||
|  | ||||
|     def report_progress(self, percent, data_len_str, speed, eta): | ||||
|         """Report download progress.""" | ||||
|         if self.params.get('noprogress', False): | ||||
|             return | ||||
|         if eta is not None: | ||||
|             eta_str = self.format_eta(eta) | ||||
|         else: | ||||
|             eta_str = 'Unknown ETA' | ||||
|         if percent is not None: | ||||
|             percent_str = self.format_percent(percent) | ||||
|         else: | ||||
|             percent_str = 'Unknown %' | ||||
|         speed_str = self.format_speed(speed) | ||||
|  | ||||
|         msg = (u'%s of %s at %s ETA %s' % | ||||
|                (percent_str, data_len_str, speed_str, eta_str)) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
|     def report_progress_live_stream(self, downloaded_data_len, speed, elapsed): | ||||
|         if self.params.get('noprogress', False): | ||||
|             return | ||||
|         downloaded_str = format_bytes(downloaded_data_len) | ||||
|         speed_str = self.format_speed(speed) | ||||
|         elapsed_str = FileDownloader.format_seconds(elapsed) | ||||
|         msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
|     def report_finish(self, data_len_str, tot_time): | ||||
|         """Report download finished.""" | ||||
|         if self.params.get('noprogress', False): | ||||
|             self.to_screen(u'[download] Download completed') | ||||
|         else: | ||||
|             self._report_progress_status( | ||||
|                 (u'100%% of %s in %s' % | ||||
|                  (data_len_str, self.format_seconds(tot_time))), | ||||
|                 is_last_line=True) | ||||
|  | ||||
|     def report_resuming_byte(self, resume_len): | ||||
|         """Report attempt to resume at given byte.""" | ||||
|         self.to_screen(u'[download] Resuming download at byte %s' % resume_len) | ||||
|  | ||||
|     def report_retry(self, count, retries): | ||||
|         """Report retry in case of HTTP error 5xx""" | ||||
|         self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | ||||
|  | ||||
|     def report_file_already_downloaded(self, file_name): | ||||
|         """Report file has already been fully downloaded.""" | ||||
|         try: | ||||
|             self.to_screen(u'[download] %s has already been downloaded' % file_name) | ||||
|         except UnicodeEncodeError: | ||||
|             self.to_screen(u'[download] The file has already been downloaded') | ||||
|  | ||||
|     def report_unable_to_resume(self): | ||||
|         """Report it was impossible to resume download.""" | ||||
|         self.to_screen(u'[download] Unable to resume') | ||||
|  | ||||
|     def download(self, filename, info_dict): | ||||
|         """Download to a filename using the info from info_dict | ||||
|         Return True on success and False otherwise | ||||
|         """ | ||||
|         # Check file already present | ||||
|         if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): | ||||
|             self.report_file_already_downloaded(filename) | ||||
|             self._hook_progress({ | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|                 'total_bytes': os.path.getsize(encodeFilename(filename)), | ||||
|             }) | ||||
|             return True | ||||
|  | ||||
|         return self.real_download(filename, info_dict) | ||||
|  | ||||
|     def real_download(self, filename, info_dict): | ||||
|         """Real download process. Redefine in subclasses.""" | ||||
|         raise NotImplementedError(u'This method must be implemented by sublcasses') | ||||
|  | ||||
|     def _hook_progress(self, status): | ||||
|         for ph in self._progress_hooks: | ||||
|             ph(status) | ||||
|  | ||||
|     def add_progress_hook(self, ph): | ||||
|         """ ph gets called on download progress, with a dictionary with the entries | ||||
|         * filename: The final filename | ||||
|         * status: One of "downloading" and "finished" | ||||
|  | ||||
|         It can also have some of the following entries: | ||||
|  | ||||
|         * downloaded_bytes: Bytes on disks | ||||
|         * total_bytes: Total bytes, None if unknown | ||||
|         * tmpfilename: The filename we're currently writing to | ||||
|         * eta: The estimated time in seconds, None if unknown | ||||
|         * speed: The download speed in bytes/second, None if unknown | ||||
|  | ||||
|         Hooks are guaranteed to be called at least once (with status "finished") | ||||
|         if the download is successful. | ||||
|         """ | ||||
|         self._progress_hooks.append(ph) | ||||
							
								
								
									
										314
									
								
								youtube_dl/downloader/f4m.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										314
									
								
								youtube_dl/downloader/f4m.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,314 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import base64 | ||||
| import io | ||||
| import itertools | ||||
| import os | ||||
| import time | ||||
| import xml.etree.ElementTree as etree | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from .http import HttpFD | ||||
| from ..utils import ( | ||||
|     struct_pack, | ||||
|     struct_unpack, | ||||
|     compat_urlparse, | ||||
|     format_bytes, | ||||
|     encodeFilename, | ||||
|     sanitize_open, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FlvReader(io.BytesIO): | ||||
|     """ | ||||
|     Reader for Flv files | ||||
|     The file format is documented in https://www.adobe.com/devnet/f4v.html | ||||
|     """ | ||||
|  | ||||
|     # Utility functions for reading numbers and strings | ||||
|     def read_unsigned_long_long(self): | ||||
|         return struct_unpack('!Q', self.read(8))[0] | ||||
|  | ||||
|     def read_unsigned_int(self): | ||||
|         return struct_unpack('!I', self.read(4))[0] | ||||
|  | ||||
|     def read_unsigned_char(self): | ||||
|         return struct_unpack('!B', self.read(1))[0] | ||||
|  | ||||
|     def read_string(self): | ||||
|         res = b'' | ||||
|         while True: | ||||
|             char = self.read(1) | ||||
|             if char == b'\x00': | ||||
|                 break | ||||
|             res += char | ||||
|         return res | ||||
|  | ||||
|     def read_box_info(self): | ||||
|         """ | ||||
|         Read a box and return the info as a tuple: (box_size, box_type, box_data) | ||||
|         """ | ||||
|         real_size = size = self.read_unsigned_int() | ||||
|         box_type = self.read(4) | ||||
|         header_end = 8 | ||||
|         if size == 1: | ||||
|             real_size = self.read_unsigned_long_long() | ||||
|             header_end = 16 | ||||
|         return real_size, box_type, self.read(real_size-header_end) | ||||
|  | ||||
|     def read_asrt(self): | ||||
|         # version | ||||
|         self.read_unsigned_char() | ||||
|         # flags | ||||
|         self.read(3) | ||||
|         quality_entry_count = self.read_unsigned_char() | ||||
|         # QualityEntryCount | ||||
|         for i in range(quality_entry_count): | ||||
|             self.read_string() | ||||
|  | ||||
|         segment_run_count = self.read_unsigned_int() | ||||
|         segments = [] | ||||
|         for i in range(segment_run_count): | ||||
|             first_segment = self.read_unsigned_int() | ||||
|             fragments_per_segment = self.read_unsigned_int() | ||||
|             segments.append((first_segment, fragments_per_segment)) | ||||
|  | ||||
|         return { | ||||
|             'segment_run': segments, | ||||
|         } | ||||
|  | ||||
|     def read_afrt(self): | ||||
|         # version | ||||
|         self.read_unsigned_char() | ||||
|         # flags | ||||
|         self.read(3) | ||||
|         # time scale | ||||
|         self.read_unsigned_int() | ||||
|  | ||||
|         quality_entry_count = self.read_unsigned_char() | ||||
|         # QualitySegmentUrlModifiers | ||||
|         for i in range(quality_entry_count): | ||||
|             self.read_string() | ||||
|  | ||||
|         fragments_count = self.read_unsigned_int() | ||||
|         fragments = [] | ||||
|         for i in range(fragments_count): | ||||
|             first = self.read_unsigned_int() | ||||
|             first_ts = self.read_unsigned_long_long() | ||||
|             duration = self.read_unsigned_int() | ||||
|             if duration == 0: | ||||
|                 discontinuity_indicator = self.read_unsigned_char() | ||||
|             else: | ||||
|                 discontinuity_indicator = None | ||||
|             fragments.append({ | ||||
|                 'first': first, | ||||
|                 'ts': first_ts, | ||||
|                 'duration': duration, | ||||
|                 'discontinuity_indicator': discontinuity_indicator, | ||||
|             }) | ||||
|  | ||||
|         return { | ||||
|             'fragments': fragments, | ||||
|         } | ||||
|  | ||||
|     def read_abst(self): | ||||
|         # version | ||||
|         self.read_unsigned_char() | ||||
|         # flags | ||||
|         self.read(3) | ||||
|  | ||||
|         self.read_unsigned_int()  # BootstrapinfoVersion | ||||
|         # Profile,Live,Update,Reserved | ||||
|         self.read(1) | ||||
|         # time scale | ||||
|         self.read_unsigned_int() | ||||
|         # CurrentMediaTime | ||||
|         self.read_unsigned_long_long() | ||||
|         # SmpteTimeCodeOffset | ||||
|         self.read_unsigned_long_long() | ||||
|  | ||||
|         self.read_string()  # MovieIdentifier | ||||
|         server_count = self.read_unsigned_char() | ||||
|         # ServerEntryTable | ||||
|         for i in range(server_count): | ||||
|             self.read_string() | ||||
|         quality_count = self.read_unsigned_char() | ||||
|         # QualityEntryTable | ||||
|         for i in range(quality_count): | ||||
|             self.read_string() | ||||
|         # DrmData | ||||
|         self.read_string() | ||||
|         # MetaData | ||||
|         self.read_string() | ||||
|  | ||||
|         segments_count = self.read_unsigned_char() | ||||
|         segments = [] | ||||
|         for i in range(segments_count): | ||||
|             box_size, box_type, box_data = self.read_box_info() | ||||
|             assert box_type == b'asrt' | ||||
|             segment = FlvReader(box_data).read_asrt() | ||||
|             segments.append(segment) | ||||
|         fragments_run_count = self.read_unsigned_char() | ||||
|         fragments = [] | ||||
|         for i in range(fragments_run_count): | ||||
|             box_size, box_type, box_data = self.read_box_info() | ||||
|             assert box_type == b'afrt' | ||||
|             fragments.append(FlvReader(box_data).read_afrt()) | ||||
|  | ||||
|         return { | ||||
|             'segments': segments, | ||||
|             'fragments': fragments, | ||||
|         } | ||||
|  | ||||
|     def read_bootstrap_info(self): | ||||
|         total_size, box_type, box_data = self.read_box_info() | ||||
|         assert box_type == b'abst' | ||||
|         return FlvReader(box_data).read_abst() | ||||
|  | ||||
|  | ||||
| def read_bootstrap_info(bootstrap_bytes): | ||||
|     return FlvReader(bootstrap_bytes).read_bootstrap_info() | ||||
|  | ||||
|  | ||||
| def build_fragments_list(boot_info): | ||||
|     """ Return a list of (segment, fragment) for each fragment in the video """ | ||||
|     res = [] | ||||
|     segment_run_table = boot_info['segments'][0] | ||||
|     # I've only found videos with one segment | ||||
|     segment_run_entry = segment_run_table['segment_run'][0] | ||||
|     n_frags = segment_run_entry[1] | ||||
|     fragment_run_entry_table = boot_info['fragments'][0]['fragments'] | ||||
|     first_frag_number = fragment_run_entry_table[0]['first'] | ||||
|     for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): | ||||
|         res.append((1, frag_number)) | ||||
|     return res | ||||
|  | ||||
|  | ||||
| def write_flv_header(stream, metadata): | ||||
|     """Writes the FLV header and the metadata to stream""" | ||||
|     # FLV header | ||||
|     stream.write(b'FLV\x01') | ||||
|     stream.write(b'\x05') | ||||
|     stream.write(b'\x00\x00\x00\x09') | ||||
|     # FLV File body | ||||
|     stream.write(b'\x00\x00\x00\x00') | ||||
|     # FLVTAG | ||||
|     # Script data | ||||
|     stream.write(b'\x12') | ||||
|     # Size of the metadata with 3 bytes | ||||
|     stream.write(struct_pack('!L', len(metadata))[1:]) | ||||
|     stream.write(b'\x00\x00\x00\x00\x00\x00\x00') | ||||
|     stream.write(metadata) | ||||
|     # Magic numbers extracted from the output files produced by AdobeHDS.php | ||||
|     #(https://github.com/K-S-V/Scripts) | ||||
|     stream.write(b'\x00\x00\x01\x73') | ||||
|  | ||||
|  | ||||
| def _add_ns(prop): | ||||
|     return '{http://ns.adobe.com/f4m/1.0}%s' % prop | ||||
|  | ||||
|  | ||||
| class HttpQuietDownloader(HttpFD): | ||||
|     def to_screen(self, *args, **kargs): | ||||
|         pass | ||||
|  | ||||
|  | ||||
| class F4mFD(FileDownloader): | ||||
|     """ | ||||
|     A downloader for f4m manifests or AdobeHDS. | ||||
|     """ | ||||
|  | ||||
|     def real_download(self, filename, info_dict): | ||||
|         man_url = info_dict['url'] | ||||
|         self.to_screen('[download] Downloading f4m manifest') | ||||
|         manifest = self.ydl.urlopen(man_url).read() | ||||
|         self.report_destination(filename) | ||||
|         http_dl = HttpQuietDownloader(self.ydl, | ||||
|             { | ||||
|                 'continuedl': True, | ||||
|                 'quiet': True, | ||||
|                 'noprogress': True, | ||||
|                 'test': self.params.get('test', False), | ||||
|             }) | ||||
|  | ||||
|         doc = etree.fromstring(manifest) | ||||
|         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] | ||||
|         formats = sorted(formats, key=lambda f: f[0]) | ||||
|         rate, media = formats[-1] | ||||
|         base_url = compat_urlparse.urljoin(man_url, media.attrib['url']) | ||||
|         bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text) | ||||
|         metadata = base64.b64decode(media.find(_add_ns('metadata')).text) | ||||
|         boot_info = read_bootstrap_info(bootstrap) | ||||
|         fragments_list = build_fragments_list(boot_info) | ||||
|         if self.params.get('test', False): | ||||
|             # We only download the first fragment | ||||
|             fragments_list = fragments_list[:1] | ||||
|         total_frags = len(fragments_list) | ||||
|  | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|         (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') | ||||
|         write_flv_header(dest_stream, metadata) | ||||
|  | ||||
|         # This dict stores the download progress, it's updated by the progress | ||||
|         # hook | ||||
|         state = { | ||||
|             'downloaded_bytes': 0, | ||||
|             'frag_counter': 0, | ||||
|         } | ||||
|         start = time.time() | ||||
|  | ||||
|         def frag_progress_hook(status): | ||||
|             frag_total_bytes = status.get('total_bytes', 0) | ||||
|             estimated_size = (state['downloaded_bytes'] + | ||||
|                 (total_frags - state['frag_counter']) * frag_total_bytes) | ||||
|             if status['status'] == 'finished': | ||||
|                 state['downloaded_bytes'] += frag_total_bytes | ||||
|                 state['frag_counter'] += 1 | ||||
|                 progress = self.calc_percent(state['frag_counter'], total_frags) | ||||
|                 byte_counter = state['downloaded_bytes'] | ||||
|             else: | ||||
|                 frag_downloaded_bytes = status['downloaded_bytes'] | ||||
|                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes | ||||
|                 frag_progress = self.calc_percent(frag_downloaded_bytes, | ||||
|                     frag_total_bytes) | ||||
|                 progress = self.calc_percent(state['frag_counter'], total_frags) | ||||
|                 progress += frag_progress / float(total_frags) | ||||
|  | ||||
|             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) | ||||
|             self.report_progress(progress, format_bytes(estimated_size), | ||||
|                 status.get('speed'), eta) | ||||
|         http_dl.add_progress_hook(frag_progress_hook) | ||||
|  | ||||
|         frags_filenames = [] | ||||
|         for (seg_i, frag_i) in fragments_list: | ||||
|             name = 'Seg%d-Frag%d' % (seg_i, frag_i) | ||||
|             url = base_url + name | ||||
|             frag_filename = '%s-%s' % (tmpfilename, name) | ||||
|             success = http_dl.download(frag_filename, {'url': url}) | ||||
|             if not success: | ||||
|                 return False | ||||
|             with open(frag_filename, 'rb') as down: | ||||
|                 down_data = down.read() | ||||
|                 reader = FlvReader(down_data) | ||||
|                 while True: | ||||
|                     _, box_type, box_data = reader.read_box_info() | ||||
|                     if box_type == b'mdat': | ||||
|                         dest_stream.write(box_data) | ||||
|                         break | ||||
|             frags_filenames.append(frag_filename) | ||||
|  | ||||
|         self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start) | ||||
|  | ||||
|         self.try_rename(tmpfilename, filename) | ||||
|         for frag_file in frags_filenames: | ||||
|             os.remove(frag_file) | ||||
|  | ||||
|         fsize = os.path.getsize(encodeFilename(filename)) | ||||
|         self._hook_progress({ | ||||
|             'downloaded_bytes': fsize, | ||||
|             'total_bytes': fsize, | ||||
|             'filename': filename, | ||||
|             'status': 'finished', | ||||
|         }) | ||||
|  | ||||
|         return True | ||||
							
								
								
									
										44
									
								
								youtube_dl/downloader/hls.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								youtube_dl/downloader/hls.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| import os | ||||
| import subprocess | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from ..utils import ( | ||||
|     encodeFilename, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class HlsFD(FileDownloader): | ||||
|     def real_download(self, filename, info_dict): | ||||
|         url = info_dict['url'] | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|  | ||||
|         args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy', | ||||
|             '-bsf:a', 'aac_adtstoasc', tmpfilename] | ||||
|  | ||||
|         for program in ['avconv', 'ffmpeg']: | ||||
|             try: | ||||
|                 subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|                 break | ||||
|             except (OSError, IOError): | ||||
|                 pass | ||||
|         else: | ||||
|             self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found') | ||||
|         cmd = [program] + args | ||||
|  | ||||
|         retval = subprocess.call(cmd) | ||||
|         if retval == 0: | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize)) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'ffmpeg exited with code %d' % retval) | ||||
|             return False | ||||
							
								
								
									
										187
									
								
								youtube_dl/downloader/http.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								youtube_dl/downloader/http.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,187 @@ | ||||
| import os | ||||
| import time | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from ..utils import ( | ||||
|     compat_urllib_request, | ||||
|     compat_urllib_error, | ||||
|     ContentTooShortError, | ||||
|  | ||||
|     encodeFilename, | ||||
|     sanitize_open, | ||||
|     format_bytes, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class HttpFD(FileDownloader): | ||||
|     def real_download(self, filename, info_dict): | ||||
|         url = info_dict['url'] | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|         stream = None | ||||
|  | ||||
|         # Do not include the Accept-Encoding header | ||||
|         headers = {'Youtubedl-no-compression': 'True'} | ||||
|         if 'user_agent' in info_dict: | ||||
|             headers['Youtubedl-user-agent'] = info_dict['user_agent'] | ||||
|         basic_request = compat_urllib_request.Request(url, None, headers) | ||||
|         request = compat_urllib_request.Request(url, None, headers) | ||||
|  | ||||
|         if self.params.get('test', False): | ||||
|             request.add_header('Range', 'bytes=0-10240') | ||||
|  | ||||
|         # Establish possible resume length | ||||
|         if os.path.isfile(encodeFilename(tmpfilename)): | ||||
|             resume_len = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|         else: | ||||
|             resume_len = 0 | ||||
|  | ||||
|         open_mode = 'wb' | ||||
|         if resume_len != 0: | ||||
|             if self.params.get('continuedl', False): | ||||
|                 self.report_resuming_byte(resume_len) | ||||
|                 request.add_header('Range', 'bytes=%d-' % resume_len) | ||||
|                 open_mode = 'ab' | ||||
|             else: | ||||
|                 resume_len = 0 | ||||
|  | ||||
|         count = 0 | ||||
|         retries = self.params.get('retries', 0) | ||||
|         while count <= retries: | ||||
|             # Establish connection | ||||
|             try: | ||||
|                 data = self.ydl.urlopen(request) | ||||
|                 break | ||||
|             except (compat_urllib_error.HTTPError, ) as err: | ||||
|                 if (err.code < 500 or err.code >= 600) and err.code != 416: | ||||
|                     # Unexpected HTTP error | ||||
|                     raise | ||||
|                 elif err.code == 416: | ||||
|                     # Unable to resume (requested range not satisfiable) | ||||
|                     try: | ||||
|                         # Open the connection again without the range header | ||||
|                         data = self.ydl.urlopen(basic_request) | ||||
|                         content_length = data.info()['Content-Length'] | ||||
|                     except (compat_urllib_error.HTTPError, ) as err: | ||||
|                         if err.code < 500 or err.code >= 600: | ||||
|                             raise | ||||
|                     else: | ||||
|                         # Examine the reported length | ||||
|                         if (content_length is not None and | ||||
|                                 (resume_len - 100 < int(content_length) < resume_len + 100)): | ||||
|                             # The file had already been fully downloaded. | ||||
|                             # Explanation to the above condition: in issue #175 it was revealed that | ||||
|                             # YouTube sometimes adds or removes a few bytes from the end of the file, | ||||
|                             # changing the file size slightly and causing problems for some users. So | ||||
|                             # I decided to implement a suggested change and consider the file | ||||
|                             # completely downloaded if the file size differs less than 100 bytes from | ||||
|                             # the one in the hard drive. | ||||
|                             self.report_file_already_downloaded(filename) | ||||
|                             self.try_rename(tmpfilename, filename) | ||||
|                             self._hook_progress({ | ||||
|                                 'filename': filename, | ||||
|                                 'status': 'finished', | ||||
|                             }) | ||||
|                             return True | ||||
|                         else: | ||||
|                             # The length does not match, we start the download over | ||||
|                             self.report_unable_to_resume() | ||||
|                             resume_len = 0 | ||||
|                             open_mode = 'wb' | ||||
|                             break | ||||
|             # Retry | ||||
|             count += 1 | ||||
|             if count <= retries: | ||||
|                 self.report_retry(count, retries) | ||||
|  | ||||
|         if count > retries: | ||||
|             self.report_error(u'giving up after %s retries' % retries) | ||||
|             return False | ||||
|  | ||||
|         data_len = data.info().get('Content-length', None) | ||||
|         if data_len is not None: | ||||
|             data_len = int(data_len) + resume_len | ||||
|             min_data_len = self.params.get("min_filesize", None) | ||||
|             max_data_len = self.params.get("max_filesize", None) | ||||
|             if min_data_len is not None and data_len < min_data_len: | ||||
|                 self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) | ||||
|                 return False | ||||
|             if max_data_len is not None and data_len > max_data_len: | ||||
|                 self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) | ||||
|                 return False | ||||
|  | ||||
|         data_len_str = format_bytes(data_len) | ||||
|         byte_counter = 0 + resume_len | ||||
|         block_size = self.params.get('buffersize', 1024) | ||||
|         start = time.time() | ||||
|         while True: | ||||
|             # Download and write | ||||
|             before = time.time() | ||||
|             data_block = data.read(block_size) | ||||
|             after = time.time() | ||||
|             if len(data_block) == 0: | ||||
|                 break | ||||
|             byte_counter += len(data_block) | ||||
|  | ||||
|             # Open file just in time | ||||
|             if stream is None: | ||||
|                 try: | ||||
|                     (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) | ||||
|                     assert stream is not None | ||||
|                     filename = self.undo_temp_name(tmpfilename) | ||||
|                     self.report_destination(filename) | ||||
|                 except (OSError, IOError) as err: | ||||
|                     self.report_error(u'unable to open for writing: %s' % str(err)) | ||||
|                     return False | ||||
|             try: | ||||
|                 stream.write(data_block) | ||||
|             except (IOError, OSError) as err: | ||||
|                 self.to_stderr(u"\n") | ||||
|                 self.report_error(u'unable to write data: %s' % str(err)) | ||||
|                 return False | ||||
|             if not self.params.get('noresizebuffer', False): | ||||
|                 block_size = self.best_block_size(after - before, len(data_block)) | ||||
|  | ||||
|             # Progress message | ||||
|             speed = self.calc_speed(start, time.time(), byte_counter - resume_len) | ||||
|             if data_len is None: | ||||
|                 eta = percent = None | ||||
|             else: | ||||
|                 percent = self.calc_percent(byte_counter, data_len) | ||||
|                 eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) | ||||
|             self.report_progress(percent, data_len_str, speed, eta) | ||||
|  | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': byte_counter, | ||||
|                 'total_bytes': data_len, | ||||
|                 'tmpfilename': tmpfilename, | ||||
|                 'filename': filename, | ||||
|                 'status': 'downloading', | ||||
|                 'eta': eta, | ||||
|                 'speed': speed, | ||||
|             }) | ||||
|  | ||||
|             # Apply rate limit | ||||
|             self.slow_down(start, byte_counter - resume_len) | ||||
|  | ||||
|         if stream is None: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'Did not get any data blocks') | ||||
|             return False | ||||
|         stream.close() | ||||
|         self.report_finish(data_len_str, (time.time() - start)) | ||||
|         if data_len is not None and byte_counter != data_len: | ||||
|             raise ContentTooShortError(byte_counter, int(data_len)) | ||||
|         self.try_rename(tmpfilename, filename) | ||||
|  | ||||
|         # Update file modification time | ||||
|         if self.params.get('updatetime', True): | ||||
|             info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) | ||||
|  | ||||
|         self._hook_progress({ | ||||
|             'downloaded_bytes': byte_counter, | ||||
|             'total_bytes': byte_counter, | ||||
|             'filename': filename, | ||||
|             'status': 'finished', | ||||
|         }) | ||||
|  | ||||
|         return True | ||||
							
								
								
									
										40
									
								
								youtube_dl/downloader/mplayer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								youtube_dl/downloader/mplayer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| import os | ||||
| import subprocess | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from ..utils import ( | ||||
|     encodeFilename, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class MplayerFD(FileDownloader): | ||||
|     def real_download(self, filename, info_dict): | ||||
|         url = info_dict['url'] | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|  | ||||
|         args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url] | ||||
|         # Check for mplayer first | ||||
|         try: | ||||
|             subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|         except (OSError, IOError): | ||||
|             self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0]) | ||||
|             return False | ||||
|  | ||||
|         # Download using mplayer. | ||||
|         retval = subprocess.call(args) | ||||
|         if retval == 0: | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr(u"\n") | ||||
|             self.report_error(u'mplayer exited with code %d' % retval) | ||||
|             return False | ||||
							
								
								
									
										195
									
								
								youtube_dl/downloader/rtmp.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										195
									
								
								youtube_dl/downloader/rtmp.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,195 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import subprocess | ||||
| import sys | ||||
| import time | ||||
|  | ||||
| from .common import FileDownloader | ||||
| from ..utils import ( | ||||
|     encodeFilename, | ||||
|     format_bytes, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class RtmpFD(FileDownloader): | ||||
|     def real_download(self, filename, info_dict): | ||||
|         def run_rtmpdump(args): | ||||
|             start = time.time() | ||||
|             resume_percent = None | ||||
|             resume_downloaded_data_len = None | ||||
|             proc = subprocess.Popen(args, stderr=subprocess.PIPE) | ||||
|             cursor_in_new_line = True | ||||
|             proc_stderr_closed = False | ||||
|             while not proc_stderr_closed: | ||||
|                 # read line from stderr | ||||
|                 line = '' | ||||
|                 while True: | ||||
|                     char = proc.stderr.read(1) | ||||
|                     if not char: | ||||
|                         proc_stderr_closed = True | ||||
|                         break | ||||
|                     if char in [b'\r', b'\n']: | ||||
|                         break | ||||
|                     line += char.decode('ascii', 'replace') | ||||
|                 if not line: | ||||
|                     # proc_stderr_closed is True | ||||
|                     continue | ||||
|                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) | ||||
|                 if mobj: | ||||
|                     downloaded_data_len = int(float(mobj.group(1))*1024) | ||||
|                     percent = float(mobj.group(2)) | ||||
|                     if not resume_percent: | ||||
|                         resume_percent = percent | ||||
|                         resume_downloaded_data_len = downloaded_data_len | ||||
|                     eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) | ||||
|                     speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) | ||||
|                     data_len = None | ||||
|                     if percent > 0: | ||||
|                         data_len = int(downloaded_data_len * 100 / percent) | ||||
|                     data_len_str = '~' + format_bytes(data_len) | ||||
|                     self.report_progress(percent, data_len_str, speed, eta) | ||||
|                     cursor_in_new_line = False | ||||
|                     self._hook_progress({ | ||||
|                         'downloaded_bytes': downloaded_data_len, | ||||
|                         'total_bytes': data_len, | ||||
|                         'tmpfilename': tmpfilename, | ||||
|                         'filename': filename, | ||||
|                         'status': 'downloading', | ||||
|                         'eta': eta, | ||||
|                         'speed': speed, | ||||
|                     }) | ||||
|                 else: | ||||
|                     # no percent for live streams | ||||
|                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) | ||||
|                     if mobj: | ||||
|                         downloaded_data_len = int(float(mobj.group(1))*1024) | ||||
|                         time_now = time.time() | ||||
|                         speed = self.calc_speed(start, time_now, downloaded_data_len) | ||||
|                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) | ||||
|                         cursor_in_new_line = False | ||||
|                         self._hook_progress({ | ||||
|                             'downloaded_bytes': downloaded_data_len, | ||||
|                             'tmpfilename': tmpfilename, | ||||
|                             'filename': filename, | ||||
|                             'status': 'downloading', | ||||
|                             'speed': speed, | ||||
|                         }) | ||||
|                     elif self.params.get('verbose', False): | ||||
|                         if not cursor_in_new_line: | ||||
|                             self.to_screen('') | ||||
|                         cursor_in_new_line = True | ||||
|                         self.to_screen('[rtmpdump] '+line) | ||||
|             proc.wait() | ||||
|             if not cursor_in_new_line: | ||||
|                 self.to_screen('') | ||||
|             return proc.returncode | ||||
|  | ||||
|         url = info_dict['url'] | ||||
|         player_url = info_dict.get('player_url', None) | ||||
|         page_url = info_dict.get('page_url', None) | ||||
|         app = info_dict.get('app', None) | ||||
|         play_path = info_dict.get('play_path', None) | ||||
|         tc_url = info_dict.get('tc_url', None) | ||||
|         flash_version = info_dict.get('flash_version', None) | ||||
|         live = info_dict.get('rtmp_live', False) | ||||
|         conn = info_dict.get('rtmp_conn', None) | ||||
|  | ||||
|         self.report_destination(filename) | ||||
|         tmpfilename = self.temp_name(filename) | ||||
|         test = self.params.get('test', False) | ||||
|  | ||||
|         # Check for rtmpdump first | ||||
|         try: | ||||
|             subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | ||||
|         except (OSError, IOError): | ||||
|             self.report_error('RTMP download detected but "rtmpdump" could not be run') | ||||
|             return False | ||||
|  | ||||
|         # Download using rtmpdump. rtmpdump returns exit code 2 when | ||||
|         # the connection was interrumpted and resuming appears to be | ||||
|         # possible. This is part of rtmpdump's normal usage, AFAIK. | ||||
|         basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename] | ||||
|         if player_url is not None: | ||||
|             basic_args += ['--swfVfy', player_url] | ||||
|         if page_url is not None: | ||||
|             basic_args += ['--pageUrl', page_url] | ||||
|         if app is not None: | ||||
|             basic_args += ['--app', app] | ||||
|         if play_path is not None: | ||||
|             basic_args += ['--playpath', play_path] | ||||
|         if tc_url is not None: | ||||
|             basic_args += ['--tcUrl', url] | ||||
|         if test: | ||||
|             basic_args += ['--stop', '1'] | ||||
|         if flash_version is not None: | ||||
|             basic_args += ['--flashVer', flash_version] | ||||
|         if live: | ||||
|             basic_args += ['--live'] | ||||
|         if conn: | ||||
|             basic_args += ['--conn', conn] | ||||
|         args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)] | ||||
|  | ||||
|         if sys.platform == 'win32' and sys.version_info < (3, 0): | ||||
|             # Windows subprocess module does not actually support Unicode | ||||
|             # on Python 2.x | ||||
|             # See http://stackoverflow.com/a/9951851/35070 | ||||
|             subprocess_encoding = sys.getfilesystemencoding() | ||||
|             args = [a.encode(subprocess_encoding, 'ignore') for a in args] | ||||
|         else: | ||||
|             subprocess_encoding = None | ||||
|  | ||||
|         if self.params.get('verbose', False): | ||||
|             if subprocess_encoding: | ||||
|                 str_args = [ | ||||
|                     a.decode(subprocess_encoding) if isinstance(a, bytes) else a | ||||
|                     for a in args] | ||||
|             else: | ||||
|                 str_args = args | ||||
|             try: | ||||
|                 import pipes | ||||
|                 shell_quote = lambda args: ' '.join(map(pipes.quote, str_args)) | ||||
|             except ImportError: | ||||
|                 shell_quote = repr | ||||
|             self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args)) | ||||
|  | ||||
|         RD_SUCCESS = 0 | ||||
|         RD_FAILED = 1 | ||||
|         RD_INCOMPLETE = 2 | ||||
|         RD_NO_CONNECT = 3 | ||||
|  | ||||
|         retval = run_rtmpdump(args) | ||||
|  | ||||
|         if retval == RD_NO_CONNECT: | ||||
|             self.report_error('[rtmpdump] Could not connect to RTMP server.') | ||||
|             return False | ||||
|  | ||||
|         while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: | ||||
|             prevsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen('[rtmpdump] %s bytes' % prevsize) | ||||
|             time.sleep(5.0) # This seems to be needed | ||||
|             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) | ||||
|             cursize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             if prevsize == cursize and retval == RD_FAILED: | ||||
|                 break | ||||
|              # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those | ||||
|             if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: | ||||
|                 self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') | ||||
|                 retval = RD_SUCCESS | ||||
|                 break | ||||
|         if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): | ||||
|             fsize = os.path.getsize(encodeFilename(tmpfilename)) | ||||
|             self.to_screen('[rtmpdump] %s bytes' % fsize) | ||||
|             self.try_rename(tmpfilename, filename) | ||||
|             self._hook_progress({ | ||||
|                 'downloaded_bytes': fsize, | ||||
|                 'total_bytes': fsize, | ||||
|                 'filename': filename, | ||||
|                 'status': 'finished', | ||||
|             }) | ||||
|             return True | ||||
|         else: | ||||
|             self.to_stderr('\n') | ||||
|             self.report_error('rtmpdump exited with code %d' % retval) | ||||
|             return False | ||||
| @@ -1,5 +1,6 @@ | ||||
| from .academicearth import AcademicEarthCourseIE | ||||
| from .addanime import AddAnimeIE | ||||
| from .aftonbladet import AftonbladetIE | ||||
| from .anitube import AnitubeIE | ||||
| from .aparat import AparatIE | ||||
| from .appletrailers import AppleTrailersIE | ||||
| @@ -9,26 +10,37 @@ from .arte import ( | ||||
|     ArteTvIE, | ||||
|     ArteTVPlus7IE, | ||||
|     ArteTVCreativeIE, | ||||
|     ArteTVConcertIE, | ||||
|     ArteTVFutureIE, | ||||
|     ArteTVDDCIE, | ||||
| ) | ||||
| from .auengine import AUEngineIE | ||||
| from .bambuser import BambuserIE, BambuserChannelIE | ||||
| from .bandcamp import BandcampIE, BandcampAlbumIE | ||||
| from .bbccouk import BBCCoUkIE | ||||
| from .blinkx import BlinkxIE | ||||
| from .bliptv import BlipTVIE, BlipTVUserIE | ||||
| from .bloomberg import BloombergIE | ||||
| from .br import BRIE | ||||
| from .breakcom import BreakIE | ||||
| from .brightcove import BrightcoveIE | ||||
| from .c56 import C56IE | ||||
| from .canal13cl import Canal13clIE | ||||
| from .canalplus import CanalplusIE | ||||
| from .canalc2 import Canalc2IE | ||||
| from .cbs import CBSIE | ||||
| from .ceskatelevize import CeskaTelevizeIE | ||||
| from .channel9 import Channel9IE | ||||
| from .chilloutzone import ChilloutzoneIE | ||||
| from .cinemassacre import CinemassacreIE | ||||
| from .clipfish import ClipfishIE | ||||
| from .cliphunter import CliphunterIE | ||||
| from .clipsyndicate import ClipsyndicateIE | ||||
| from .cnn import CNNIE | ||||
| from .cmt import CMTIE | ||||
| from .cnn import ( | ||||
|     CNNIE, | ||||
|     CNNBlogsIE, | ||||
| ) | ||||
| from .collegehumor import CollegeHumorIE | ||||
| from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE | ||||
| from .condenast import CondeNastIE | ||||
| @@ -42,89 +54,135 @@ from .dailymotion import ( | ||||
|     DailymotionUserIE, | ||||
| ) | ||||
| from .daum import DaumIE | ||||
| from .depositfiles import DepositFilesIE | ||||
| from .dotsub import DotsubIE | ||||
| from .dreisat import DreiSatIE | ||||
| from .defense import DefenseGouvFrIE | ||||
| from .discovery import DiscoveryIE | ||||
| from .dropbox import DropboxIE | ||||
| from .ebaumsworld import EbaumsWorldIE | ||||
| from .ehow import EHowIE | ||||
| from .eighttracks import EightTracksIE | ||||
| from .eitb import EitbIE | ||||
| from .elpais import ElPaisIE | ||||
| from .escapist import EscapistIE | ||||
| from .everyonesmixtape import EveryonesMixtapeIE | ||||
| from .exfm import ExfmIE | ||||
| from .extremetube import ExtremeTubeIE | ||||
| from .facebook import FacebookIE | ||||
| from .faz import FazIE | ||||
| from .firstpost import FirstpostIE | ||||
| from .firsttv import FirstTVIE | ||||
| from .fktv import ( | ||||
|     FKTVIE, | ||||
|     FKTVPosteckeIE, | ||||
| ) | ||||
| from .flickr import FlickrIE | ||||
| from .fourtube import FourTubeIE | ||||
| from .franceinter import FranceInterIE | ||||
| from .francetv import ( | ||||
|     PluzzIE, | ||||
|     FranceTvInfoIE, | ||||
|     FranceTVIE, | ||||
|     GenerationQuoiIE | ||||
|     GenerationQuoiIE, | ||||
|     CultureboxIE, | ||||
| ) | ||||
| from .freesound import FreesoundIE | ||||
| from .freespeech import FreespeechIE | ||||
| from .funnyordie import FunnyOrDieIE | ||||
| from .gamekings import GamekingsIE | ||||
| from .gamespot import GameSpotIE | ||||
| from .gametrailers import GametrailersIE | ||||
| from .gdcvault import GDCVaultIE | ||||
| from .generic import GenericIE | ||||
| from .googleplus import GooglePlusIE | ||||
| from .googlesearch import GoogleSearchIE | ||||
| from .hark import HarkIE | ||||
| from .helsinki import HelsinkiIE | ||||
| from .hotnewhiphop import HotNewHipHopIE | ||||
| from .howcast import HowcastIE | ||||
| from .huffpost import HuffPostIE | ||||
| from .hypem import HypemIE | ||||
| from .ign import IGNIE, OneUPIE | ||||
| from .imdb import ImdbIE | ||||
| from .imdb import ( | ||||
|     ImdbIE, | ||||
|     ImdbListIE | ||||
| ) | ||||
| from .ina import InaIE | ||||
| from .infoq import InfoQIE | ||||
| from .instagram import InstagramIE | ||||
| from .internetvideoarchive import InternetVideoArchiveIE | ||||
| from .iprima import IPrimaIE | ||||
| from .ivi import ( | ||||
|     IviIE, | ||||
|     IviCompilationIE | ||||
| ) | ||||
| from .jadorecettepub import JadoreCettePubIE | ||||
| from .jeuxvideo import JeuxVideoIE | ||||
| from .jukebox import JukeboxIE | ||||
| from .justintv import JustinTVIE | ||||
| from .jpopsukitv import JpopsukiIE | ||||
| from .kankan import KankanIE | ||||
| from .keezmovies import KeezMoviesIE | ||||
| from .khanacademy import KhanAcademyIE | ||||
| from .kickstarter import KickStarterIE | ||||
| from .keek import KeekIE | ||||
| from .kontrtube import KontrTubeIE | ||||
| from .la7 import LA7IE | ||||
| from .lifenews import LifeNewsIE | ||||
| from .liveleak import LiveLeakIE | ||||
| from .livestream import LivestreamIE, LivestreamOriginalIE | ||||
| from .lynda import ( | ||||
|     LyndaIE, | ||||
|     LyndaCourseIE | ||||
| ) | ||||
| from .m6 import M6IE | ||||
| from .macgamestore import MacGameStoreIE | ||||
| from .mailru import MailRuIE | ||||
| from .malemotion import MalemotionIE | ||||
| from .mdr import MDRIE | ||||
| from .metacafe import MetacafeIE | ||||
| from .metacritic import MetacriticIE | ||||
| from .mit import TechTVMITIE, MITIE | ||||
| from .mit import TechTVMITIE, MITIE, OCWMITIE | ||||
| from .mixcloud import MixcloudIE | ||||
| from .mpora import MporaIE | ||||
| from .mofosex import MofosexIE | ||||
| from .mtv import MTVIE | ||||
| from .mooshare import MooshareIE | ||||
| from .mtv import ( | ||||
|     MTVIE, | ||||
|     MTVIggyIE, | ||||
| ) | ||||
| from .muzu import MuzuTVIE | ||||
| from .myspace import MySpaceIE | ||||
| from .myspass import MySpassIE | ||||
| from .myvideo import MyVideoIE | ||||
| from .naver import NaverIE | ||||
| from .nba import NBAIE | ||||
| from .nbc import NBCNewsIE | ||||
| from .nbc import ( | ||||
|     NBCIE, | ||||
|     NBCNewsIE, | ||||
| ) | ||||
| from .ndr import NDRIE | ||||
| from .ndtv import NDTVIE | ||||
| from .newgrounds import NewgroundsIE | ||||
| from .nfb import NFBIE | ||||
| from .nhl import NHLIE, NHLVideocenterIE | ||||
| from .niconico import NiconicoIE | ||||
| from .ninegag import NineGagIE | ||||
| from .normalboots import NormalbootsIE | ||||
| from .novamov import NovaMovIE | ||||
| from .nowness import NownessIE | ||||
| from .nowvideo import NowVideoIE | ||||
| from .ooyala import OoyalaIE | ||||
| from .orf import ORFIE | ||||
| from .parliamentliveuk import ParliamentLiveUKIE | ||||
| from .pbs import PBSIE | ||||
| from .photobucket import PhotobucketIE | ||||
| from .playvid import PlayvidIE | ||||
| from .podomatic import PodomaticIE | ||||
| from .pornhd import PornHdIE | ||||
| from .pornhub import PornHubIE | ||||
| from .pornotube import PornotubeIE | ||||
| from .prosiebensat1 import ProSiebenSat1IE | ||||
| from .pyvideo import PyvideoIE | ||||
| from .radiofrance import RadioFranceIE | ||||
| from .rbmaradio import RBMARadioIE | ||||
| @@ -134,7 +192,15 @@ from .ro220 import Ro220IE | ||||
| from .rottentomatoes import RottenTomatoesIE | ||||
| from .roxwel import RoxwelIE | ||||
| from .rtlnow import RTLnowIE | ||||
| from .rutube import RutubeIE | ||||
| from .rutube import ( | ||||
|     RutubeIE, | ||||
|     RutubeChannelIE, | ||||
|     RutubeMovieIE, | ||||
|     RutubePersonIE, | ||||
| ) | ||||
| from .rutv import RUTVIE | ||||
| from .savefrom import SaveFromIE | ||||
| from .servingsys import ServingSysIE | ||||
| from .sina import SinaIE | ||||
| from .slashdot import SlashdotIE | ||||
| from .slideshare import SlideshareIE | ||||
| @@ -153,33 +219,46 @@ from .southparkstudios import ( | ||||
| from .space import SpaceIE | ||||
| from .spankwire import SpankwireIE | ||||
| from .spiegel import SpiegelIE | ||||
| from .spike import SpikeIE | ||||
| from .stanfordoc import StanfordOpenClassroomIE | ||||
| from .statigram import StatigramIE | ||||
| from .steam import SteamIE | ||||
| from .streamcloud import StreamcloudIE | ||||
| from .streamcz import StreamCZIE | ||||
| from .syfy import SyfyIE | ||||
| from .sztvhu import SztvHuIE | ||||
| from .teamcoco import TeamcocoIE | ||||
| from .techtalks import TechTalksIE | ||||
| from .ted import TEDIE | ||||
| from .testurl import TestURLIE | ||||
| from .tf1 import TF1IE | ||||
| from .theplatform import ThePlatformIE | ||||
| from .thisav import ThisAVIE | ||||
| from .tinypic import TinyPicIE | ||||
| from .toutv import TouTvIE | ||||
| from .traileraddict import TrailerAddictIE | ||||
| from .trilulilu import TriluliluIE | ||||
| from .trutube import TruTubeIE | ||||
| from .tube8 import Tube8IE | ||||
| from .tudou import TudouIE | ||||
| from .tumblr import TumblrIE | ||||
| from .tutv import TutvIE | ||||
| from .tvigle import TvigleIE | ||||
| from .tvp import TvpIE | ||||
| from .udemy import ( | ||||
|     UdemyIE, | ||||
|     UdemyCourseIE | ||||
| ) | ||||
| from .unistra import UnistraIE | ||||
| from .ustream import UstreamIE, UstreamChannelIE | ||||
| from .vbox7 import Vbox7IE | ||||
| from .veehd import VeeHDIE | ||||
| from .veoh import VeohIE | ||||
| from .vesti import VestiIE | ||||
| from .vevo import VevoIE | ||||
| from .vice import ViceIE | ||||
| from .viddler import ViddlerIE | ||||
| from .videobam import VideoBamIE | ||||
| from .videodetective import VideoDetectiveIE | ||||
| from .videofyme import VideofyMeIE | ||||
| from .videopremium import VideoPremiumIE | ||||
| @@ -189,12 +268,14 @@ from .vimeo import ( | ||||
|     VimeoUserIE, | ||||
|     VimeoAlbumIE, | ||||
|     VimeoGroupsIE, | ||||
|     VimeoReviewIE, | ||||
| ) | ||||
| from .vine import VineIE | ||||
| from .viki import VikiIE | ||||
| from .vk import VKIE | ||||
| from .vube import VubeIE | ||||
| from .wat import WatIE | ||||
| from .websurg import WeBSurgIE | ||||
| from .wdr import WDRIE | ||||
| from .weibo import WeiboIE | ||||
| from .wimp import WimpIE | ||||
| from .wistia import WistiaIE | ||||
| @@ -213,19 +294,20 @@ from .youku import YoukuIE | ||||
| from .youporn import YouPornIE | ||||
| from .youtube import ( | ||||
|     YoutubeIE, | ||||
|     YoutubePlaylistIE, | ||||
|     YoutubeSearchIE, | ||||
|     YoutubeSearchDateIE, | ||||
|     YoutubeUserIE, | ||||
|     YoutubeChannelIE, | ||||
|     YoutubeShowIE, | ||||
|     YoutubeSubscriptionsIE, | ||||
|     YoutubeRecommendedIE, | ||||
|     YoutubeTruncatedURLIE, | ||||
|     YoutubeWatchLaterIE, | ||||
|     YoutubeFavouritesIE, | ||||
|     YoutubeHistoryIE, | ||||
|     YoutubePlaylistIE, | ||||
|     YoutubeRecommendedIE, | ||||
|     YoutubeSearchDateIE, | ||||
|     YoutubeSearchIE, | ||||
|     YoutubeSearchURLIE, | ||||
|     YoutubeShowIE, | ||||
|     YoutubeSubscriptionsIE, | ||||
|     YoutubeTopListIE, | ||||
|     YoutubeTruncatedURLIE, | ||||
|     YoutubeUserIE, | ||||
|     YoutubeWatchLaterIE, | ||||
| ) | ||||
| from .zdf import ZDFIE | ||||
|  | ||||
|   | ||||
| @@ -1,11 +1,12 @@ | ||||
| from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class AcademicEarthCourseIE(InfoExtractor): | ||||
|     _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)' | ||||
|     IE_NAME = u'AcademicEarth:Course' | ||||
|     _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)' | ||||
|     IE_NAME = 'AcademicEarth:Course' | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         m = re.match(self._VALID_URL, url) | ||||
| @@ -13,12 +14,12 @@ class AcademicEarthCourseIE(InfoExtractor): | ||||
|  | ||||
|         webpage = self._download_webpage(url, playlist_id) | ||||
|         title = self._html_search_regex( | ||||
|             r'<h1 class="playlist-name">(.*?)</h1>', webpage, u'title') | ||||
|             r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title') | ||||
|         description = self._html_search_regex( | ||||
|             r'<p class="excerpt">(.*?)</p>', | ||||
|             r'<p class="excerpt"[^>]*?>(.*?)</p>', | ||||
|             webpage, u'description', fatal=False) | ||||
|         urls = re.findall( | ||||
|             r'<h3 class="lecture-title"><a target="_blank" href="([^"]+)">', | ||||
|             r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">', | ||||
|             webpage) | ||||
|         entries = [self.url_result(u) for u in urls] | ||||
|  | ||||
|   | ||||
							
								
								
									
										69
									
								
								youtube_dl/extractor/aftonbladet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								youtube_dl/extractor/aftonbladet.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,69 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import datetime | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class AftonbladetIE(InfoExtractor): | ||||
|     _VALID_URL = r'^http://tv\.aftonbladet\.se/webbtv.+?(?P<video_id>article[0-9]+)\.ab(?:$|[?#])' | ||||
|     _TEST = { | ||||
|         'url': 'http://tv.aftonbladet.se/webbtv/nyheter/vetenskap/rymden/article36015.ab', | ||||
|         'info_dict': { | ||||
|             'id': 'article36015', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna', | ||||
|             'description': 'Jupiters måne mest aktiv av alla himlakroppar', | ||||
|             'upload_date': '20140306', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.search(self._VALID_URL, url) | ||||
|  | ||||
|         video_id = mobj.group('video_id') | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         # find internal video meta data | ||||
|         META_URL = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json' | ||||
|         internal_meta_id = self._html_search_regex( | ||||
|             r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id') | ||||
|         internal_meta_url = META_URL % internal_meta_id | ||||
|         internal_meta_json = self._download_json( | ||||
|             internal_meta_url, video_id, 'Downloading video meta data') | ||||
|  | ||||
|         # find internal video formats | ||||
|         FORMATS_URL = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s' | ||||
|         internal_video_id = internal_meta_json['videoId'] | ||||
|         internal_formats_url = FORMATS_URL % internal_video_id | ||||
|         internal_formats_json = self._download_json( | ||||
|             internal_formats_url, video_id, 'Downloading video formats') | ||||
|  | ||||
|         formats = [] | ||||
|         for fmt in internal_formats_json['formats']['http']['pseudostreaming']['mp4']: | ||||
|             p = fmt['paths'][0] | ||||
|             formats.append({ | ||||
|                 'url': 'http://%s:%d/%s/%s' % (p['address'], p['port'], p['path'], p['filename']), | ||||
|                 'ext': 'mp4', | ||||
|                 'width': fmt['width'], | ||||
|                 'height': fmt['height'], | ||||
|                 'tbr': fmt['bitrate'], | ||||
|                 'protocol': 'http', | ||||
|             }) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         timestamp = datetime.datetime.fromtimestamp(internal_meta_json['timePublished']) | ||||
|         upload_date = timestamp.strftime('%Y%m%d') | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': internal_meta_json['title'], | ||||
|             'formats': formats, | ||||
|             'thumbnail': internal_meta_json['imageUrl'], | ||||
|             'description': internal_meta_json['shortPreamble'], | ||||
|             'upload_date': upload_date, | ||||
|             'duration': internal_meta_json['duration'], | ||||
|             'view_count': internal_meta_json['views'], | ||||
|         } | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -11,46 +13,46 @@ from ..utils import ( | ||||
| class AppleTrailersIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)' | ||||
|     _TEST = { | ||||
|         u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/", | ||||
|         u"playlist": [ | ||||
|         "url": "http://trailers.apple.com/trailers/wb/manofsteel/", | ||||
|         "playlist": [ | ||||
|             { | ||||
|                 u"file": u"manofsteel-trailer4.mov", | ||||
|                 u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8", | ||||
|                 u"info_dict": { | ||||
|                     u"duration": 111, | ||||
|                     u"title": u"Trailer 4", | ||||
|                     u"upload_date": u"20130523", | ||||
|                     u"uploader_id": u"wb", | ||||
|                 "file": "manofsteel-trailer4.mov", | ||||
|                 "md5": "d97a8e575432dbcb81b7c3acb741f8a8", | ||||
|                 "info_dict": { | ||||
|                     "duration": 111, | ||||
|                     "title": "Trailer 4", | ||||
|                     "upload_date": "20130523", | ||||
|                     "uploader_id": "wb", | ||||
|                 }, | ||||
|             }, | ||||
|             { | ||||
|                 u"file": u"manofsteel-trailer3.mov", | ||||
|                 u"md5": u"b8017b7131b721fb4e8d6f49e1df908c", | ||||
|                 u"info_dict": { | ||||
|                     u"duration": 182, | ||||
|                     u"title": u"Trailer 3", | ||||
|                     u"upload_date": u"20130417", | ||||
|                     u"uploader_id": u"wb", | ||||
|                 "file": "manofsteel-trailer3.mov", | ||||
|                 "md5": "b8017b7131b721fb4e8d6f49e1df908c", | ||||
|                 "info_dict": { | ||||
|                     "duration": 182, | ||||
|                     "title": "Trailer 3", | ||||
|                     "upload_date": "20130417", | ||||
|                     "uploader_id": "wb", | ||||
|                 }, | ||||
|             }, | ||||
|             { | ||||
|                 u"file": u"manofsteel-trailer.mov", | ||||
|                 u"md5": u"d0f1e1150989b9924679b441f3404d48", | ||||
|                 u"info_dict": { | ||||
|                     u"duration": 148, | ||||
|                     u"title": u"Trailer", | ||||
|                     u"upload_date": u"20121212", | ||||
|                     u"uploader_id": u"wb", | ||||
|                 "file": "manofsteel-trailer.mov", | ||||
|                 "md5": "d0f1e1150989b9924679b441f3404d48", | ||||
|                 "info_dict": { | ||||
|                     "duration": 148, | ||||
|                     "title": "Trailer", | ||||
|                     "upload_date": "20121212", | ||||
|                     "uploader_id": "wb", | ||||
|                 }, | ||||
|             }, | ||||
|             { | ||||
|                 u"file": u"manofsteel-teaser.mov", | ||||
|                 u"md5": u"5fe08795b943eb2e757fa95cb6def1cb", | ||||
|                 u"info_dict": { | ||||
|                     u"duration": 93, | ||||
|                     u"title": u"Teaser", | ||||
|                     u"upload_date": u"20120721", | ||||
|                     u"uploader_id": u"wb", | ||||
|                 "file": "manofsteel-teaser.mov", | ||||
|                 "md5": "5fe08795b943eb2e757fa95cb6def1cb", | ||||
|                 "info_dict": { | ||||
|                     "duration": 93, | ||||
|                     "title": "Teaser", | ||||
|                     "upload_date": "20120721", | ||||
|                     "uploader_id": "wb", | ||||
|                 }, | ||||
|             } | ||||
|         ] | ||||
| @@ -110,7 +112,8 @@ class AppleTrailersIE(InfoExtractor): | ||||
|                     'width': format['width'], | ||||
|                     'height': int(format['height']), | ||||
|                 }) | ||||
|             formats = sorted(formats, key=lambda f: (f['height'], f['width'])) | ||||
|  | ||||
|             self._sort_formats(formats) | ||||
|  | ||||
|             playlist.append({ | ||||
|                 '_type': 'video', | ||||
|   | ||||
| @@ -1,9 +1,10 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     unified_strdate, | ||||
| ) | ||||
|  | ||||
| @@ -13,23 +14,22 @@ class ArchiveOrgIE(InfoExtractor): | ||||
|     IE_DESC = 'archive.org videos' | ||||
|     _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$' | ||||
|     _TEST = { | ||||
|         u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", | ||||
|         u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', | ||||
|         u'md5': u'8af1d4cf447933ed3c7f4871162602db', | ||||
|         u'info_dict': { | ||||
|             u"title": u"1968 Demo - FJCC Conference Presentation Reel #1", | ||||
|             u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>", | ||||
|             u"upload_date": u"19681210", | ||||
|             u"uploader": u"SRI International" | ||||
|         "url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", | ||||
|         'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', | ||||
|         'md5': '8af1d4cf447933ed3c7f4871162602db', | ||||
|         'info_dict': { | ||||
|             "title": "1968 Demo - FJCC Conference Presentation Reel #1", | ||||
|             "description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>", | ||||
|             "upload_date": "19681210", | ||||
|             "uploader": "SRI International" | ||||
|         } | ||||
|     } | ||||
|  | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         json_url = url + (u'?' if u'?' in url else '&') + u'output=json' | ||||
|         json_url = url + ('?' if '?' in url else '&') + 'output=json' | ||||
|         json_data = self._download_webpage(json_url, video_id) | ||||
|         data = json.loads(json_data) | ||||
|  | ||||
| @@ -38,16 +38,16 @@ class ArchiveOrgIE(InfoExtractor): | ||||
|         uploader = data['metadata']['creator'][0] | ||||
|         upload_date = unified_strdate(data['metadata']['date'][0]) | ||||
|  | ||||
|         formats = [{ | ||||
|         formats = [ | ||||
|             { | ||||
|                 'format': fdata['format'], | ||||
|                 'url': 'http://' + data['server'] + data['dir'] + fn, | ||||
|                 'file_size': int(fdata['size']), | ||||
|             } | ||||
|             for fn,fdata in data['files'].items() | ||||
|             for fn, fdata in data['files'].items() | ||||
|             if 'Video' in fdata['format']] | ||||
|         formats.sort(key=lambda fdata: fdata['file_size']) | ||||
|         for f in formats: | ||||
|             f['ext'] = determine_ext(f['url']) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             '_type': 'video', | ||||
|   | ||||
| @@ -1,22 +1,28 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class ARDIE(InfoExtractor): | ||||
|     _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?' | ||||
|     _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>' | ||||
|     _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)' | ||||
|     _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640', | ||||
|         u'file': u'14077640.mp4', | ||||
|         u'md5': u'6ca8824255460c787376353f9e20bbd8', | ||||
|         u'info_dict': { | ||||
|             u"title": u"11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden" | ||||
|         'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786', | ||||
|         'file': '19288786.mp4', | ||||
|         'md5': '515bf47ce209fb3f5a61b7aad364634c', | ||||
|         'info_dict': { | ||||
|             'title': 'Edward Snowden im Interview - Held oder Verräter?', | ||||
|             'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.', | ||||
|             'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037', | ||||
|         }, | ||||
|         u'skip': u'Requires rtmpdump' | ||||
|         'skip': 'Blocked outside of Germany', | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
| @@ -29,26 +35,49 @@ class ARDIE(InfoExtractor): | ||||
|         else: | ||||
|             video_id = m.group('video_id') | ||||
|  | ||||
|         # determine title and media streams from webpage | ||||
|         html = self._download_webpage(url, video_id) | ||||
|         title = re.search(self._TITLE, html).group('title') | ||||
|         streams = [mo.groupdict() for mo in re.finditer(self._MEDIA_STREAM, html)] | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         title = self._html_search_regex( | ||||
|             r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>', webpage, 'title') | ||||
|         description = self._html_search_meta( | ||||
|             'dcterms.abstract', webpage, 'description') | ||||
|         thumbnail = self._og_search_thumbnail(webpage) | ||||
|  | ||||
|         streams = [ | ||||
|             mo.groupdict() | ||||
|             for mo in re.finditer( | ||||
|                 r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)', webpage)] | ||||
|         if not streams: | ||||
|             assert '"fsk"' in html | ||||
|             raise ExtractorError(u'This video is only available after 8:00 pm') | ||||
|             if '"fsk"' in webpage: | ||||
|                 raise ExtractorError('This video is only available after 20:00') | ||||
|  | ||||
|         # choose default media type and highest quality for now | ||||
|         stream = max([s for s in streams if int(s["media_type"]) == 0], | ||||
|                      key=lambda s: int(s["quality"])) | ||||
|         formats = [] | ||||
|         for s in streams: | ||||
|             format = { | ||||
|                 'quality': int(s['quality']), | ||||
|             } | ||||
|             if s.get('rtmp_url'): | ||||
|                 format['protocol'] = 'rtmp' | ||||
|                 format['url'] = s['rtmp_url'] | ||||
|                 format['playpath'] = s['video_url'] | ||||
|             else: | ||||
|                 format['url'] = s['video_url'] | ||||
|  | ||||
|         # there's two possibilities: RTMP stream or HTTP download | ||||
|         info = {'id': video_id, 'title': title, 'ext': 'mp4'} | ||||
|         if stream['rtmp_url']: | ||||
|             self.to_screen(u'RTMP download detected') | ||||
|             assert stream['video_url'].startswith('mp4:') | ||||
|             info["url"] = stream["rtmp_url"] | ||||
|             info["play_path"] = stream['video_url'] | ||||
|         else: | ||||
|             assert stream["video_url"].endswith('.mp4') | ||||
|             info["url"] = stream["video_url"] | ||||
|         return [info] | ||||
|             quality_name = self._search_regex( | ||||
|                 r'[,.]([a-zA-Z0-9_-]+),?\.mp4', format['url'], | ||||
|                 'quality name', default='NA') | ||||
|             format['format_id'] = '%s-%s-%s-%s' % ( | ||||
|                 determine_ext(format['url']), quality_name, s['media_type'], | ||||
|                 s['quality']) | ||||
|  | ||||
|             formats.append(format) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'formats': formats, | ||||
|             'thumbnail': thumbnail, | ||||
|         } | ||||
|   | ||||
| @@ -1,4 +1,6 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -22,7 +24,7 @@ class ArteTvIE(InfoExtractor): | ||||
|     _LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)' | ||||
|     _LIVE_URL = r'index-[0-9]+\.html$' | ||||
|  | ||||
|     IE_NAME = u'arte.tv' | ||||
|     IE_NAME = 'arte.tv' | ||||
|  | ||||
|     @classmethod | ||||
|     def suitable(cls, url): | ||||
| @@ -37,7 +39,7 @@ class ArteTvIE(InfoExtractor): | ||||
|     #         r'src="(.*?/videothek_js.*?\.js)', | ||||
|     #         0, | ||||
|     #         [ | ||||
|     #             (1, 'url', u'Invalid URL: %s' % url) | ||||
|     #             (1, 'url', 'Invalid URL: %s' % url) | ||||
|     #         ] | ||||
|     #     ) | ||||
|     #     http_host = url.split('/')[2] | ||||
| @@ -49,12 +51,12 @@ class ArteTvIE(InfoExtractor): | ||||
|     #             '(rtmp://.*?)\'', | ||||
|     #         re.DOTALL, | ||||
|     #         [ | ||||
|     #             (1, 'path',   u'could not extract video path: %s' % url), | ||||
|     #             (2, 'player', u'could not extract video player: %s' % url), | ||||
|     #             (3, 'url',    u'could not extract video url: %s' % url) | ||||
|     #             (1, 'path',   'could not extract video path: %s' % url), | ||||
|     #             (2, 'player', 'could not extract video player: %s' % url), | ||||
|     #             (3, 'url',    'could not extract video url: %s' % url) | ||||
|     #         ] | ||||
|     #     ) | ||||
|     #     video_url = u'%s/%s' % (info.get('url'), info.get('path')) | ||||
|     #     video_url = '%s/%s' % (info.get('url'), info.get('path')) | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VIDEOS_URL, url) | ||||
| @@ -70,18 +72,22 @@ class ArteTvIE(InfoExtractor): | ||||
|             return self._extract_liveweb(url, name, lang) | ||||
|  | ||||
|         if re.search(self._LIVE_URL, url) is not None: | ||||
|             raise ExtractorError(u'Arte live streams are not yet supported, sorry') | ||||
|             raise ExtractorError('Arte live streams are not yet supported, sorry') | ||||
|             # self.extractLiveStream(url) | ||||
|             # return | ||||
|  | ||||
|         raise ExtractorError('No video found') | ||||
|  | ||||
|     def _extract_video(self, url, video_id, lang): | ||||
|         """Extract from videos.arte.tv""" | ||||
|         ref_xml_url = url.replace('/videos/', '/do_delegate/videos/') | ||||
|         ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml') | ||||
|         ref_xml_doc = self._download_xml(ref_xml_url, video_id, note=u'Downloading metadata') | ||||
|         ref_xml_doc = self._download_xml( | ||||
|             ref_xml_url, video_id, note='Downloading metadata') | ||||
|         config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang) | ||||
|         config_xml_url = config_node.attrib['ref'] | ||||
|         config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration') | ||||
|         config_xml = self._download_webpage( | ||||
|             config_xml_url, video_id, note='Downloading configuration') | ||||
|  | ||||
|         video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml)) | ||||
|         def _key(m): | ||||
| @@ -107,9 +113,9 @@ class ArteTvIE(InfoExtractor): | ||||
|     def _extract_liveweb(self, url, name, lang): | ||||
|         """Extract form http://liveweb.arte.tv/""" | ||||
|         webpage = self._download_webpage(url, name) | ||||
|         video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id') | ||||
|         video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id') | ||||
|         config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id, | ||||
|                                             video_id, u'Downloading information') | ||||
|                                             video_id, 'Downloading information') | ||||
|         event_doc = config_doc.find('event') | ||||
|         url_node = event_doc.find('video').find('urlHd') | ||||
|         if url_node is None: | ||||
| @@ -124,8 +130,8 @@ class ArteTvIE(InfoExtractor): | ||||
|  | ||||
|  | ||||
| class ArteTVPlus7IE(InfoExtractor): | ||||
|     IE_NAME = u'arte.tv:+7' | ||||
|     _VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?' | ||||
|     IE_NAME = 'arte.tv:+7' | ||||
|     _VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?' | ||||
|  | ||||
|     @classmethod | ||||
|     def _extract_url_info(cls, url): | ||||
| @@ -196,6 +202,8 @@ class ArteTVPlus7IE(InfoExtractor): | ||||
|                     re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None, | ||||
|                     # The version with sourds/mal subtitles has also lower relevance | ||||
|                     re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None, | ||||
|                     # Prefer http downloads over m3u8 | ||||
|                     0 if f['url'].endswith('m3u8') else 1, | ||||
|                 ) | ||||
|         formats = sorted(formats, key=sort_key) | ||||
|         def _format(format_info): | ||||
| @@ -207,7 +215,7 @@ class ArteTVPlus7IE(InfoExtractor): | ||||
|             if bitrate is not None: | ||||
|                 quality += '-%d' % bitrate | ||||
|             if format_info.get('versionCode') is not None: | ||||
|                 format_id = u'%s-%s' % (quality, format_info['versionCode']) | ||||
|                 format_id = '%s-%s' % (quality, format_info['versionCode']) | ||||
|             else: | ||||
|                 format_id = quality | ||||
|             info = { | ||||
| @@ -216,7 +224,7 @@ class ArteTVPlus7IE(InfoExtractor): | ||||
|                 'width': format_info.get('width'), | ||||
|                 'height': height, | ||||
|             } | ||||
|             if format_info['mediaType'] == u'rtmp': | ||||
|             if format_info['mediaType'] == 'rtmp': | ||||
|                 info['url'] = format_info['streamer'] | ||||
|                 info['play_path'] = 'mp4:' + format_info['url'] | ||||
|                 info['ext'] = 'flv' | ||||
| @@ -231,27 +239,29 @@ class ArteTVPlus7IE(InfoExtractor): | ||||
|  | ||||
| # It also uses the arte_vp_url url from the webpage to extract the information | ||||
| class ArteTVCreativeIE(ArteTVPlus7IE): | ||||
|     IE_NAME = u'arte.tv:creative' | ||||
|     IE_NAME = 'arte.tv:creative' | ||||
|     _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', | ||||
|         u'file': u'050489-002.mp4', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design', | ||||
|         'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', | ||||
|         'info_dict': { | ||||
|             'id': '050489-002', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|  | ||||
| class ArteTVFutureIE(ArteTVPlus7IE): | ||||
|     IE_NAME = u'arte.tv:future' | ||||
|     IE_NAME = 'arte.tv:future' | ||||
|     _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', | ||||
|         u'file': u'050940-003.mp4', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Les champignons au secours de la planète', | ||||
|         'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', | ||||
|         'info_dict': { | ||||
|             'id': '050940-003', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Les champignons au secours de la planète', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
| @@ -263,8 +273,8 @@ class ArteTVFutureIE(ArteTVPlus7IE): | ||||
|  | ||||
|  | ||||
| class ArteTVDDCIE(ArteTVPlus7IE): | ||||
|     IE_NAME = u'arte.tv:ddc' | ||||
|     _VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)' | ||||
|     IE_NAME = 'arte.tv:ddc' | ||||
|     _VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)' | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id, lang = self._extract_url_info(url) | ||||
| @@ -278,3 +288,19 @@ class ArteTVDDCIE(ArteTVPlus7IE): | ||||
|         javascriptPlayerGenerator = self._download_webpage(script_url, video_id, 'Download javascript player generator') | ||||
|         json_url = self._search_regex(r"json_url=(.*)&rendering_place.*", javascriptPlayerGenerator, 'json url') | ||||
|         return self._extract_from_json_url(json_url, video_id, lang) | ||||
|  | ||||
|  | ||||
| class ArteTVConcertIE(ArteTVPlus7IE): | ||||
|     IE_NAME = 'arte.tv:concert' | ||||
|     _VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde', | ||||
|         'md5': '9ea035b7bd69696b67aa2ccaaa218161', | ||||
|         'info_dict': { | ||||
|             'id': '186', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'The Notwist im Pariser Konzertclub "Divan du Monde"', | ||||
|             'upload_date': '20140128', | ||||
|         }, | ||||
|     } | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -7,13 +9,14 @@ from ..utils import ( | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class AUEngineIE(InfoExtractor): | ||||
|     _TEST = { | ||||
|         u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', | ||||
|         u'file': u'lfvlytY6.mp4', | ||||
|         u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f', | ||||
|         u'info_dict': { | ||||
|             u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]" | ||||
|         'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', | ||||
|         'file': 'lfvlytY6.mp4', | ||||
|         'md5': '48972bdbcf1a3a2f5533e62425b41d4f', | ||||
|         'info_dict': { | ||||
|             'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]' | ||||
|         } | ||||
|     } | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?' | ||||
| @@ -23,7 +26,7 @@ class AUEngineIE(InfoExtractor): | ||||
|         video_id = mobj.group(1) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', | ||||
|                 webpage, u'title') | ||||
|                 webpage, 'title') | ||||
|         title = title.strip() | ||||
|         links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage) | ||||
|         links = map(compat_urllib_parse.unquote, links) | ||||
| @@ -37,7 +40,7 @@ class AUEngineIE(InfoExtractor): | ||||
|                 video_url = link | ||||
|         if not video_url: | ||||
|             raise ExtractorError(u'Could not find video URL') | ||||
|         ext = u'.' + determine_ext(video_url) | ||||
|         ext = '.' + determine_ext(video_url) | ||||
|         if ext == title[-len(ext):]: | ||||
|             title = title[:-len(ext)] | ||||
|  | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
| import itertools | ||||
| @@ -9,26 +11,26 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class BambuserIE(InfoExtractor): | ||||
|     IE_NAME = u'bambuser' | ||||
|     IE_NAME = 'bambuser' | ||||
|     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)' | ||||
|     _API_KEY = '005f64509e19a868399060af746a00aa' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://bambuser.com/v/4050584', | ||||
|         'url': 'http://bambuser.com/v/4050584', | ||||
|         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 | ||||
|         #u'md5': u'fba8f7693e48fd4e8641b3fd5539a641', | ||||
|         u'info_dict': { | ||||
|             u'id': u'4050584', | ||||
|             u'ext': u'flv', | ||||
|             u'title': u'Education engineering days - lightning talks', | ||||
|             u'duration': 3741, | ||||
|             u'uploader': u'pixelversity', | ||||
|             u'uploader_id': u'344706', | ||||
|         #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641', | ||||
|         'info_dict': { | ||||
|             'id': '4050584', | ||||
|             'ext': 'flv', | ||||
|             'title': 'Education engineering days - lightning talks', | ||||
|             'duration': 3741, | ||||
|             'uploader': 'pixelversity', | ||||
|             'uploader_id': '344706', | ||||
|         }, | ||||
|         u'params': { | ||||
|         'params': { | ||||
|             # It doesn't respect the 'Range' header, it would download the whole video | ||||
|             # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59 | ||||
|             u'skip_download': True, | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|     } | ||||
|  | ||||
| @@ -53,7 +55,7 @@ class BambuserIE(InfoExtractor): | ||||
|  | ||||
|  | ||||
| class BambuserChannelIE(InfoExtractor): | ||||
|     IE_NAME = u'bambuser:channel' | ||||
|     IE_NAME = 'bambuser:channel' | ||||
|     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)' | ||||
|     # The maximum number we can get with each request | ||||
|     _STEP = 50 | ||||
| @@ -72,7 +74,7 @@ class BambuserChannelIE(InfoExtractor): | ||||
|             # Without setting this header, we wouldn't get any result | ||||
|             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) | ||||
|             info_json = self._download_webpage(req, user, | ||||
|                 u'Downloading page %d' % i) | ||||
|                 'Downloading page %d' % i) | ||||
|             results = json.loads(info_json)['result'] | ||||
|             if len(results) == 0: | ||||
|                 break | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
|  | ||||
| @@ -10,16 +12,16 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class BandcampIE(InfoExtractor): | ||||
|     IE_NAME = u'Bandcamp' | ||||
|     _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)' | ||||
|     _TESTS = [{ | ||||
|         u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', | ||||
|         u'file': u'1812978515.mp3', | ||||
|         u'md5': u'cdeb30cdae1921719a3cbcab696ef53c', | ||||
|         u'info_dict': { | ||||
|             u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad" | ||||
|         'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', | ||||
|         'file': '1812978515.mp3', | ||||
|         'md5': 'c557841d5e50261777a6585648adf439', | ||||
|         'info_dict': { | ||||
|             "title": "youtube-dl  \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad", | ||||
|             "duration": 10, | ||||
|         }, | ||||
|         u'skip': u'There is a limit of 200 free downloads / month for the test song' | ||||
|         '_skip': 'There is a limit of 200 free downloads / month for the test song' | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
| @@ -30,85 +32,98 @@ class BandcampIE(InfoExtractor): | ||||
|         m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage) | ||||
|         if m_download is None: | ||||
|             m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage) | ||||
|         if m_trackinfo: | ||||
|             json_code = m_trackinfo.group(1) | ||||
|             data = json.loads(json_code) | ||||
|             if m_trackinfo: | ||||
|                 json_code = m_trackinfo.group(1) | ||||
|                 data = json.loads(json_code) | ||||
|                 d = data[0] | ||||
|  | ||||
|                 duration = int(round(d['duration'])) | ||||
|                 formats = [] | ||||
|                 for format_id, format_url in d['file'].items(): | ||||
|                     ext, _, abr_str = format_id.partition('-') | ||||
|  | ||||
|                     formats.append({ | ||||
|                         'format_id': format_id, | ||||
|                         'url': format_url, | ||||
|                         'ext': format_id.partition('-')[0], | ||||
|                         'vcodec': 'none', | ||||
|                         'acodec': format_id.partition('-')[0], | ||||
|                         'abr': int(format_id.partition('-')[2]), | ||||
|                     }) | ||||
|  | ||||
|                 self._sort_formats(formats) | ||||
|  | ||||
|             for d in data: | ||||
|                 formats = [{ | ||||
|                     'format_id': 'format_id', | ||||
|                     'url': format_url, | ||||
|                     'ext': format_id.partition('-')[0] | ||||
|                 } for format_id, format_url in sorted(d['file'].items())] | ||||
|                 return { | ||||
|                     'id': compat_str(d['id']), | ||||
|                     'title': d['title'], | ||||
|                     'formats': formats, | ||||
|                     'duration': duration, | ||||
|                 } | ||||
|         else: | ||||
|             raise ExtractorError(u'No free songs found') | ||||
|             else: | ||||
|                 raise ExtractorError('No free songs found') | ||||
|  | ||||
|         download_link = m_download.group(1) | ||||
|         id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',  | ||||
|                        webpage, re.MULTILINE|re.DOTALL).group('id') | ||||
|         video_id = re.search( | ||||
|             r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', | ||||
|             webpage, re.MULTILINE | re.DOTALL).group('id') | ||||
|  | ||||
|         download_webpage = self._download_webpage(download_link, id, | ||||
|         download_webpage = self._download_webpage(download_link, video_id, | ||||
|                                                   'Downloading free downloads page') | ||||
|         # We get the dictionary of the track from some javascrip code | ||||
|         info = re.search(r'items: (.*?),$', | ||||
|                          download_webpage, re.MULTILINE).group(1) | ||||
|         info = json.loads(info)[0] | ||||
|         # We pick mp3-320 for now, until format selection can be easily implemented. | ||||
|         mp3_info = info[u'downloads'][u'mp3-320'] | ||||
|         mp3_info = info['downloads']['mp3-320'] | ||||
|         # If we try to use this url it says the link has expired | ||||
|         initial_url = mp3_info[u'url'] | ||||
|         initial_url = mp3_info['url'] | ||||
|         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$' | ||||
|         m_url = re.match(re_url, initial_url) | ||||
|         #We build the url we will use to get the final track url | ||||
|         # This url is build in Bandcamp in the script download_bunde_*.js | ||||
|         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts')) | ||||
|         final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url') | ||||
|         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) | ||||
|         final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') | ||||
|         # If we could correctly generate the .rand field the url would be | ||||
|         #in the "download_url" key | ||||
|         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) | ||||
|  | ||||
|         track_info = {'id':id, | ||||
|                       'title' : info[u'title'], | ||||
|                       'ext' :   'mp3', | ||||
|                       'url' :   final_url, | ||||
|                       'thumbnail' : info[u'thumb_url'], | ||||
|                       'uploader' :  info[u'artist'] | ||||
|                       } | ||||
|  | ||||
|         return [track_info] | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': info['title'], | ||||
|             'ext': 'mp3', | ||||
|             'vcodec': 'none', | ||||
|             'url': final_url, | ||||
|             'thumbnail': info.get('thumb_url'), | ||||
|             'uploader': info.get('artist'), | ||||
|         } | ||||
|  | ||||
|  | ||||
| class BandcampAlbumIE(InfoExtractor): | ||||
|     IE_NAME = u'Bandcamp:album' | ||||
|     IE_NAME = 'Bandcamp:album' | ||||
|     _VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', | ||||
|         u'playlist': [ | ||||
|         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', | ||||
|         'playlist': [ | ||||
|             { | ||||
|                 u'file': u'1353101989.mp3', | ||||
|                 u'md5': u'39bc1eded3476e927c724321ddf116cf', | ||||
|                 u'info_dict': { | ||||
|                     u'title': u'Intro', | ||||
|                 'file': '1353101989.mp3', | ||||
|                 'md5': '39bc1eded3476e927c724321ddf116cf', | ||||
|                 'info_dict': { | ||||
|                     'title': 'Intro', | ||||
|                 } | ||||
|             }, | ||||
|             { | ||||
|                 u'file': u'38097443.mp3', | ||||
|                 u'md5': u'1a2c32e2691474643e912cc6cd4bffaa', | ||||
|                 u'info_dict': { | ||||
|                     u'title': u'Kero One - Keep It Alive (Blazo remix)', | ||||
|                 'file': '38097443.mp3', | ||||
|                 'md5': '1a2c32e2691474643e912cc6cd4bffaa', | ||||
|                 'info_dict': { | ||||
|                     'title': 'Kero One - Keep It Alive (Blazo remix)', | ||||
|                 } | ||||
|             }, | ||||
|         ], | ||||
|         u'params': { | ||||
|             u'playlistend': 2 | ||||
|         'params': { | ||||
|             'playlistend': 2 | ||||
|         }, | ||||
|         u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' | ||||
|         'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
| @@ -117,11 +132,11 @@ class BandcampAlbumIE(InfoExtractor): | ||||
|         webpage = self._download_webpage(url, title) | ||||
|         tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage) | ||||
|         if not tracks_paths: | ||||
|             raise ExtractorError(u'The page doesn\'t contain any track') | ||||
|             raise ExtractorError('The page doesn\'t contain any tracks') | ||||
|         entries = [ | ||||
|             self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) | ||||
|             for t_path in tracks_paths] | ||||
|         title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title') | ||||
|         title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title') | ||||
|         return { | ||||
|             '_type': 'playlist', | ||||
|             'title': title, | ||||
|   | ||||
							
								
								
									
										223
									
								
								youtube_dl/extractor/bbccouk.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										223
									
								
								youtube_dl/extractor/bbccouk.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,223 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .subtitles import SubtitlesInfoExtractor | ||||
| from ..utils import ExtractorError | ||||
|  | ||||
|  | ||||
| class BBCCoUkIE(SubtitlesInfoExtractor): | ||||
|     IE_NAME = 'bbc.co.uk' | ||||
|     IE_DESC = 'BBC iPlayer' | ||||
|     _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:programmes|iplayer/episode)/(?P<id>[\da-z]{8})' | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             'url': 'http://www.bbc.co.uk/programmes/b039g8p7', | ||||
|             'info_dict': { | ||||
|                 'id': 'b039d07m', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'Kaleidoscope: Leonard Cohen', | ||||
|                 'description': 'md5:db4755d7a665ae72343779f7dacb402c', | ||||
|                 'duration': 1740, | ||||
|             }, | ||||
|             'params': { | ||||
|                 # rtmp download | ||||
|                 'skip_download': True, | ||||
|             } | ||||
|         }, | ||||
|         { | ||||
|             'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', | ||||
|             'info_dict': { | ||||
|                 'id': 'b00yng1d', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'The Man in Black: Series 3: The Printed Name', | ||||
|                 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", | ||||
|                 'duration': 1800, | ||||
|             }, | ||||
|             'params': { | ||||
|                 # rtmp download | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|             'skip': 'Episode is no longer available on BBC iPlayer Radio', | ||||
|         }, | ||||
|         { | ||||
|             'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', | ||||
|             'info_dict': { | ||||
|                 'id': 'b00yng1d', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'The Voice UK: Series 3: Blind Auditions 5', | ||||
|                 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.", | ||||
|                 'duration': 5100, | ||||
|             }, | ||||
|             'params': { | ||||
|                 # rtmp download | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     def _extract_asx_playlist(self, connection, programme_id): | ||||
|         asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') | ||||
|         return [ref.get('href') for ref in asx.findall('./Entry/ref')] | ||||
|  | ||||
|     def _extract_connection(self, connection, programme_id): | ||||
|         formats = [] | ||||
|         protocol = connection.get('protocol') | ||||
|         supplier = connection.get('supplier') | ||||
|         if protocol == 'http': | ||||
|             href = connection.get('href') | ||||
|             # ASX playlist | ||||
|             if supplier == 'asx': | ||||
|                 for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): | ||||
|                     formats.append({ | ||||
|                         'url': ref, | ||||
|                         'format_id': 'ref%s_%s' % (i, supplier), | ||||
|                     }) | ||||
|             # Direct link | ||||
|             else: | ||||
|                 formats.append({ | ||||
|                     'url': href, | ||||
|                     'format_id': supplier, | ||||
|                 }) | ||||
|         elif protocol == 'rtmp': | ||||
|             application = connection.get('application', 'ondemand') | ||||
|             auth_string = connection.get('authString') | ||||
|             identifier = connection.get('identifier') | ||||
|             server = connection.get('server') | ||||
|             formats.append({ | ||||
|                 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), | ||||
|                 'play_path': identifier, | ||||
|                 'app': '%s?%s' % (application, auth_string), | ||||
|                 'page_url': 'http://www.bbc.co.uk', | ||||
|                 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', | ||||
|                 'rtmp_live': False, | ||||
|                 'ext': 'flv', | ||||
|                 'format_id': supplier, | ||||
|             }) | ||||
|         return formats | ||||
|  | ||||
|     def _extract_items(self, playlist): | ||||
|         return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') | ||||
|  | ||||
|     def _extract_medias(self, media_selection): | ||||
|         return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') | ||||
|  | ||||
|     def _extract_connections(self, media): | ||||
|         return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection') | ||||
|  | ||||
|     def _extract_video(self, media, programme_id): | ||||
|         formats = [] | ||||
|         vbr = int(media.get('bitrate')) | ||||
|         vcodec = media.get('encoding') | ||||
|         service = media.get('service') | ||||
|         width = int(media.get('width')) | ||||
|         height = int(media.get('height')) | ||||
|         file_size = int(media.get('media_file_size')) | ||||
|         for connection in self._extract_connections(media): | ||||
|             conn_formats = self._extract_connection(connection, programme_id) | ||||
|             for format in conn_formats: | ||||
|                 format.update({ | ||||
|                     'format_id': '%s_%s' % (service, format['format_id']), | ||||
|                     'width': width, | ||||
|                     'height': height, | ||||
|                     'vbr': vbr, | ||||
|                     'vcodec': vcodec, | ||||
|                     'filesize': file_size, | ||||
|                 }) | ||||
|             formats.extend(conn_formats) | ||||
|         return formats | ||||
|  | ||||
|     def _extract_audio(self, media, programme_id): | ||||
|         formats = [] | ||||
|         abr = int(media.get('bitrate')) | ||||
|         acodec = media.get('encoding') | ||||
|         service = media.get('service') | ||||
|         for connection in self._extract_connections(media): | ||||
|             conn_formats = self._extract_connection(connection, programme_id) | ||||
|             for format in conn_formats: | ||||
|                 format.update({ | ||||
|                     'format_id': '%s_%s' % (service, format['format_id']), | ||||
|                     'abr': abr, | ||||
|                     'acodec': acodec, | ||||
|                 }) | ||||
|             formats.extend(conn_formats) | ||||
|         return formats | ||||
|  | ||||
|     def _extract_captions(self, media, programme_id): | ||||
|         subtitles = {} | ||||
|         for connection in self._extract_connections(media): | ||||
|             captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions') | ||||
|             lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en') | ||||
|             ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}')) | ||||
|             srt = '' | ||||
|             for pos, p in enumerate(ps): | ||||
|                 srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'), | ||||
|                                                           p.text.strip() if p.text is not None else '') | ||||
|             subtitles[lang] = srt | ||||
|         return subtitles | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         group_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, group_id, 'Downloading video page') | ||||
|         if re.search(r'id="emp-error" class="notinuk">', webpage): | ||||
|             raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only', | ||||
|                 expected=True) | ||||
|  | ||||
|         playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id, | ||||
|             'Downloading playlist XML') | ||||
|  | ||||
|         no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') | ||||
|         if no_items is not None: | ||||
|             reason = no_items.get('reason') | ||||
|             if reason == 'preAvailability': | ||||
|                 msg = 'Episode %s is not yet available' % group_id | ||||
|             elif reason == 'postAvailability': | ||||
|                 msg = 'Episode %s is no longer available' % group_id | ||||
|             else: | ||||
|                 msg = 'Episode %s is not available: %s' % (group_id, reason) | ||||
|             raise ExtractorError(msg, expected=True) | ||||
|  | ||||
|         formats = [] | ||||
|         subtitles = None | ||||
|  | ||||
|         for item in self._extract_items(playlist): | ||||
|             kind = item.get('kind') | ||||
|             if kind != 'programme' and kind != 'radioProgramme': | ||||
|                 continue | ||||
|             title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text | ||||
|             description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text | ||||
|  | ||||
|             programme_id = item.get('identifier') | ||||
|             duration = int(item.get('duration')) | ||||
|  | ||||
|             media_selection = self._download_xml( | ||||
|                 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s'  % programme_id, | ||||
|                 programme_id, 'Downloading media selection XML') | ||||
|  | ||||
|             for media in self._extract_medias(media_selection): | ||||
|                 kind = media.get('kind') | ||||
|                 if kind == 'audio': | ||||
|                     formats.extend(self._extract_audio(media, programme_id)) | ||||
|                 elif kind == 'video': | ||||
|                     formats.extend(self._extract_video(media, programme_id)) | ||||
|                 elif kind == 'captions': | ||||
|                     subtitles = self._extract_captions(media, programme_id) | ||||
|  | ||||
|         if self._downloader.params.get('listsubtitles', False): | ||||
|             self._list_available_subtitles(programme_id, subtitles) | ||||
|             return | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': programme_id, | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'duration': duration, | ||||
|             'formats': formats, | ||||
|             'subtitles': subtitles, | ||||
|         } | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import datetime | ||||
| import json | ||||
| import re | ||||
| @@ -10,19 +12,19 @@ from ..utils import ( | ||||
|  | ||||
| class BlinkxIE(InfoExtractor): | ||||
|     _VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)' | ||||
|     _IE_NAME = u'blinkx' | ||||
|     IE_NAME = 'blinkx' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB', | ||||
|         u'file': u'8aQUy7GV.mp4', | ||||
|         u'md5': u'2e9a07364af40163a908edbf10bb2492', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Police Car Rolls Away", | ||||
|             u"uploader": u"stupidvideos.com", | ||||
|             u"upload_date": u"20131215", | ||||
|             u"description": u"A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!", | ||||
|             u"duration": 14.886, | ||||
|             u"thumbnails": [{ | ||||
|         'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB', | ||||
|         'file': '8aQUy7GV.mp4', | ||||
|         'md5': '2e9a07364af40163a908edbf10bb2492', | ||||
|         'info_dict': { | ||||
|             "title": "Police Car Rolls Away", | ||||
|             "uploader": "stupidvideos.com", | ||||
|             "upload_date": "20131215", | ||||
|             "description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!", | ||||
|             "duration": 14.886, | ||||
|             "thumbnails": [{ | ||||
|                 "width": 100, | ||||
|                 "height": 76, | ||||
|                 "url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg", | ||||
| @@ -30,17 +32,17 @@ class BlinkxIE(InfoExtractor): | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         m = re.match(self._VALID_URL, url) | ||||
|     def _real_extract(self, rl): | ||||
|         m = re.match(self._VALID_URL, rl) | ||||
|         video_id = m.group('id') | ||||
|         display_id = video_id[:8] | ||||
|  | ||||
|         api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' + | ||||
|                    u'video=%s' % video_id) | ||||
|                    'video=%s' % video_id) | ||||
|         data_json = self._download_webpage(api_url, display_id) | ||||
|         data = json.loads(data_json)['api']['results'][0] | ||||
|         dt = datetime.datetime.fromtimestamp(data['pubdate_epoch']) | ||||
|         upload_date = dt.strftime('%Y%m%d') | ||||
|         pload_date = dt.strftime('%Y%m%d') | ||||
|  | ||||
|         duration = None | ||||
|         thumbnails = [] | ||||
| @@ -61,9 +63,10 @@ class BlinkxIE(InfoExtractor): | ||||
|             elif m['type'] in ('flv', 'mp4'): | ||||
|                 vcodec = remove_start(m['vcodec'], 'ff') | ||||
|                 acodec = remove_start(m['acodec'], 'ff') | ||||
|                 tbr = (int(m['vbr']) + int(m['abr'])) // 1000 | ||||
|                 format_id = (u'%s-%sk-%s' % | ||||
|                              (vcodec, | ||||
|                               (int(m['vbr']) + int(m['abr'])) // 1000, | ||||
|                               tbr, | ||||
|                               m['w'])) | ||||
|                 formats.append({ | ||||
|                     'format_id': format_id, | ||||
| @@ -72,10 +75,12 @@ class BlinkxIE(InfoExtractor): | ||||
|                     'acodec': acodec, | ||||
|                     'abr': int(m['abr']) // 1000, | ||||
|                     'vbr': int(m['vbr']) // 1000, | ||||
|                     'tbr': tbr, | ||||
|                     'width': int(m['w']), | ||||
|                     'height': int(m['h']), | ||||
|                 }) | ||||
|         formats.sort(key=lambda f: (f['width'], f['vbr'], f['abr'])) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': display_id, | ||||
| @@ -83,7 +88,7 @@ class BlinkxIE(InfoExtractor): | ||||
|             'title': data['title'], | ||||
|             'formats': formats, | ||||
|             'uploader': data['channel_name'], | ||||
|             'upload_date': upload_date, | ||||
|             'upload_date': pload_date, | ||||
|             'description': data.get('description'), | ||||
|             'thumbnails': thumbnails, | ||||
|             'duration': duration, | ||||
|   | ||||
| @@ -1,156 +1,145 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import datetime | ||||
| import json | ||||
| import os | ||||
| import re | ||||
| import socket | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from .subtitles import SubtitlesInfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_http_client, | ||||
|     compat_parse_qs, | ||||
|     compat_str, | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urllib_request, | ||||
|  | ||||
|     ExtractorError, | ||||
|     unescapeHTML, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class BlipTVIE(InfoExtractor): | ||||
| class BlipTVIE(SubtitlesInfoExtractor): | ||||
|     """Information extractor for blip.tv""" | ||||
|  | ||||
|     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$' | ||||
|     _URL_EXT = r'^.*\.([a-z0-9]+)$' | ||||
|     IE_NAME = u'blip.tv' | ||||
|     _TEST = { | ||||
|         u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', | ||||
|         u'file': u'5779306.m4v', | ||||
|         u'md5': u'80baf1ec5c3d2019037c1c707d676b9f', | ||||
|         u'info_dict': { | ||||
|             u"upload_date": u"20111205",  | ||||
|             u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",  | ||||
|             u"uploader": u"Comic Book Resources - CBR TV",  | ||||
|             u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3" | ||||
|         } | ||||
|     } | ||||
|     _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$' | ||||
|  | ||||
|     def report_direct_download(self, title): | ||||
|         """Report information extraction.""" | ||||
|         self.to_screen(u'%s: Direct download detected' % title) | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', | ||||
|         'md5': 'c6934ad0b6acf2bd920720ec888eb812', | ||||
|         'info_dict': { | ||||
|             'id': '5779306', | ||||
|             'ext': 'mov', | ||||
|             'upload_date': '20111205', | ||||
|             'description': 'md5:9bc31f227219cde65e47eeec8d2dc596', | ||||
|             'uploader': 'Comic Book Resources - CBR TV', | ||||
|             'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3', | ||||
|         } | ||||
|     }, { | ||||
|         # https://github.com/rg3/youtube-dl/pull/2274 | ||||
|         'note': 'Video with subtitles', | ||||
|         'url': 'http://blip.tv/play/h6Uag5OEVgI.html', | ||||
|         'md5': '309f9d25b820b086ca163ffac8031806', | ||||
|         'info_dict': { | ||||
|             'id': '6586561', | ||||
|             'ext': 'mp4', | ||||
|             'uploader': 'Red vs. Blue', | ||||
|             'description': 'One-Zero-One', | ||||
|             'upload_date': '20130614', | ||||
|             'title': 'Red vs. Blue Season 11 Episode 1', | ||||
|         } | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|         presumptive_id = mobj.group('presumptive_id') | ||||
|  | ||||
|         # See https://github.com/rg3/youtube-dl/issues/857 | ||||
|         api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url) | ||||
|         if api_mobj is not None: | ||||
|             url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id') | ||||
|         urlp = compat_urllib_parse_urlparse(url) | ||||
|         if urlp.path.startswith('/play/'): | ||||
|             response = self._request_webpage(url, None, False) | ||||
|             redirecturl = response.geturl() | ||||
|             rurlp = compat_urllib_parse_urlparse(redirecturl) | ||||
|             file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2] | ||||
|             url = 'http://blip.tv/a/a-' + file_id | ||||
|             return self._real_extract(url) | ||||
|  | ||||
|  | ||||
|         if '?' in url: | ||||
|             cchar = '&' | ||||
|         else: | ||||
|             cchar = '?' | ||||
|         embed_mobj = re.match(r'https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url) | ||||
|         if embed_mobj: | ||||
|             info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1) | ||||
|             info_page = self._download_webpage(info_url, embed_mobj.group(1)) | ||||
|             video_id = self._search_regex( | ||||
|                 r'data-episode-id="([0-9]+)', info_page, 'video_id') | ||||
|             return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV') | ||||
|          | ||||
|         cchar = '&' if '?' in url else '?' | ||||
|         json_url = url + cchar + 'skin=json&version=2&no_wrap=1' | ||||
|         request = compat_urllib_request.Request(json_url) | ||||
|         request.add_header('User-Agent', 'iTunes/10.6.1') | ||||
|         self.report_extraction(mobj.group(1)) | ||||
|         info = None | ||||
|         urlh = self._request_webpage(request, None, False, | ||||
|             u'unable to download video info webpage') | ||||
|  | ||||
|         if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download | ||||
|             basename = url.split('/')[-1] | ||||
|             title,ext = os.path.splitext(basename) | ||||
|             title = title.decode('UTF-8') | ||||
|             ext = ext.replace('.', '') | ||||
|             self.report_direct_download(title) | ||||
|             return { | ||||
|                 'id': title, | ||||
|                 'url': url, | ||||
|                 'uploader': None, | ||||
|                 'upload_date': None, | ||||
|                 'title': title, | ||||
|                 'ext': ext, | ||||
|                 'urlhandle': urlh | ||||
|             } | ||||
|         json_data = self._download_json(request, video_id=presumptive_id) | ||||
|  | ||||
|         try: | ||||
|             json_code_bytes = urlh.read() | ||||
|             json_code = json_code_bytes.decode('utf-8') | ||||
|         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||
|             raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err)) | ||||
|         if 'Post' in json_data: | ||||
|             data = json_data['Post'] | ||||
|         else: | ||||
|             data = json_data | ||||
|  | ||||
|         try: | ||||
|             json_data = json.loads(json_code) | ||||
|             if 'Post' in json_data: | ||||
|                 data = json_data['Post'] | ||||
|             else: | ||||
|                 data = json_data | ||||
|         video_id = compat_str(data['item_id']) | ||||
|         upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') | ||||
|         subtitles = {} | ||||
|         formats = [] | ||||
|         if 'additionalMedia' in data: | ||||
|             for f in data['additionalMedia']: | ||||
|                 if f.get('file_type_srt') == 1: | ||||
|                     LANGS = { | ||||
|                         'english': 'en', | ||||
|                     } | ||||
|                     lang = f['role'].rpartition('-')[-1].strip().lower() | ||||
|                     langcode = LANGS.get(lang, lang) | ||||
|                     subtitles[langcode] = f['url'] | ||||
|                     continue | ||||
|                 if not int(f['media_width']):  # filter m3u8 | ||||
|                     continue | ||||
|                 formats.append({ | ||||
|                     'url': f['url'], | ||||
|                     'format_id': f['role'], | ||||
|                     'width': int(f['media_width']), | ||||
|                     'height': int(f['media_height']), | ||||
|                 }) | ||||
|         else: | ||||
|             formats.append({ | ||||
|                 'url': data['media']['url'], | ||||
|                 'width': int(data['media']['width']), | ||||
|                 'height': int(data['media']['height']), | ||||
|             }) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|             upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') | ||||
|             if 'additionalMedia' in data: | ||||
|                 formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])) | ||||
|                 best_format = formats[-1] | ||||
|                 video_url = best_format['url'] | ||||
|             else: | ||||
|                 video_url = data['media']['url'] | ||||
|             umobj = re.match(self._URL_EXT, video_url) | ||||
|             if umobj is None: | ||||
|                 raise ValueError('Can not determine filename extension') | ||||
|             ext = umobj.group(1) | ||||
|         # subtitles | ||||
|         video_subtitles = self.extract_subtitles(video_id, subtitles) | ||||
|         if self._downloader.params.get('listsubtitles', False): | ||||
|             self._list_available_subtitles(video_id, subtitles) | ||||
|             return | ||||
|  | ||||
|             return { | ||||
|                 'id': compat_str(data['item_id']), | ||||
|                 'url': video_url, | ||||
|                 'uploader': data['display_name'], | ||||
|                 'upload_date': upload_date, | ||||
|                 'title': data['title'], | ||||
|                 'ext': ext, | ||||
|                 'format': data['media']['mimeType'], | ||||
|                 'thumbnail': data['thumbnailUrl'], | ||||
|                 'description': data['description'], | ||||
|                 'player_url': data['embedUrl'], | ||||
|                 'user_agent': 'iTunes/10.6.1', | ||||
|             } | ||||
|         except (ValueError, KeyError) as err: | ||||
|             raise ExtractorError(u'Unable to parse video information: %s' % repr(err)) | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'uploader': data['display_name'], | ||||
|             'upload_date': upload_date, | ||||
|             'title': data['title'], | ||||
|             'thumbnail': data['thumbnailUrl'], | ||||
|             'description': data['description'], | ||||
|             'user_agent': 'iTunes/10.6.1', | ||||
|             'formats': formats, | ||||
|             'subtitles': video_subtitles, | ||||
|         } | ||||
|  | ||||
|     def _download_subtitle_url(self, sub_lang, url): | ||||
|         # For some weird reason, blip.tv serves a video instead of subtitles | ||||
|         # when we request with a common UA | ||||
|         req = compat_urllib_request.Request(url) | ||||
|         req.add_header('Youtubedl-user-agent', 'youtube-dl') | ||||
|         return self._download_webpage(req, None, note=False) | ||||
|  | ||||
|  | ||||
| class BlipTVUserIE(InfoExtractor): | ||||
|     """Information Extractor for blip.tv users.""" | ||||
|  | ||||
|     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' | ||||
|     _PAGE_SIZE = 12 | ||||
|     IE_NAME = u'blip.tv:user' | ||||
|     IE_NAME = 'blip.tv:user' | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         # Extract username | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|  | ||||
|         username = mobj.group(1) | ||||
|  | ||||
|         page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' | ||||
|  | ||||
|         page = self._download_webpage(url, username, u'Downloading user page') | ||||
|         page = self._download_webpage(url, username, 'Downloading user page') | ||||
|         mobj = re.search(r'data-users-id="([^"]+)"', page) | ||||
|         page_base = page_base % mobj.group(1) | ||||
|  | ||||
|  | ||||
|         # Download video ids using BlipTV Ajax calls. Result size per | ||||
|         # query is limited (currently to 12 videos) so we need to query | ||||
|         # page by page until there are no video ids - it means we got | ||||
| @@ -161,8 +150,8 @@ class BlipTVUserIE(InfoExtractor): | ||||
|  | ||||
|         while True: | ||||
|             url = page_base + "&page=" + str(pagenum) | ||||
|             page = self._download_webpage(url, username, | ||||
|                                           u'Downloading video ids from page %d' % pagenum) | ||||
|             page = self._download_webpage( | ||||
|                 url, username, 'Downloading video ids from page %d' % pagenum) | ||||
|  | ||||
|             # Extract video identifiers | ||||
|             ids_in_page = [] | ||||
| @@ -184,6 +173,6 @@ class BlipTVUserIE(InfoExtractor): | ||||
|  | ||||
|             pagenum += 1 | ||||
|  | ||||
|         urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids] | ||||
|         urls = ['http://blip.tv/%s' % video_id for video_id in video_ids] | ||||
|         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] | ||||
|         return [self.playlist_result(url_entries, playlist_title = username)] | ||||
|         return [self.playlist_result(url_entries, playlist_title=username)] | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from .ooyala import OoyalaIE | ||||
|  | ||||
|  | ||||
| class BloombergIE(InfoExtractor): | ||||
| @@ -23,5 +24,7 @@ class BloombergIE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         name = mobj.group('name') | ||||
|         webpage = self._download_webpage(url, name) | ||||
|         ooyala_url = self._og_search_video_url(webpage) | ||||
|         return self.url_result(ooyala_url, ie='Ooyala') | ||||
|         embed_code = self._search_regex( | ||||
|             r'<source src="https?://[^/]+/[^/]+/[^/]+/([^/]+)', webpage, | ||||
|             'embed code') | ||||
|         return OoyalaIE._build_url_result(embed_code) | ||||
|   | ||||
							
								
								
									
										99
									
								
								youtube_dl/extractor/br.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								youtube_dl/extractor/br.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,99 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ExtractorError | ||||
|  | ||||
|  | ||||
| class BRIE(InfoExtractor): | ||||
|     IE_DESC = "Bayerischer Rundfunk Mediathek" | ||||
|     _VALID_URL = r"^https?://(?:www\.)?br\.de/mediathek/video/(?:sendungen/)?(?:[a-z0-9\-/]+/)?(?P<id>[a-z0-9\-]+)\.html$" | ||||
|     _BASE_URL = "http://www.br.de" | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             "url": "http://www.br.de/mediathek/video/anselm-gruen-114.html", | ||||
|             "md5": "c4f83cf0f023ba5875aba0bf46860df2", | ||||
|             "info_dict": { | ||||
|                 "id": "2c8d81c5-6fb7-4a74-88d4-e768e5856532", | ||||
|                 "ext": "mp4", | ||||
|                 "title": "Feiern und Verzichten", | ||||
|                 "description": "Anselm Grün: Feiern und Verzichten", | ||||
|                 "uploader": "BR/Birgit Baier", | ||||
|                 "upload_date": "20140301" | ||||
|             } | ||||
|         }, | ||||
|         { | ||||
|             "url": "http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html", | ||||
|             "md5": "ab451b09d861dbed7d7cc9ab0be19ebe", | ||||
|             "info_dict": { | ||||
|                 "id": "2c060e69-3a27-4e13-b0f0-668fac17d812", | ||||
|                 "ext": "mp4", | ||||
|                 "title": "Über den Pass", | ||||
|                 "description": "Die Eroberung der Alpen: Über den Pass", | ||||
|                 "uploader": None, | ||||
|                 "upload_date": None | ||||
|             } | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         display_id = mobj.group('id') | ||||
|         page = self._download_webpage(url, display_id) | ||||
|         xml_url = self._search_regex( | ||||
|             r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/mediathek/video/[a-z0-9/~_.-]+)'}\)\);", page, "XMLURL") | ||||
|         xml = self._download_xml(self._BASE_URL + xml_url, None) | ||||
|  | ||||
|         videos = [] | ||||
|         for xml_video in xml.findall("video"): | ||||
|             video = { | ||||
|                 "id": xml_video.get("externalId"), | ||||
|                 "title": xml_video.find("title").text, | ||||
|                 "formats": self._extract_formats(xml_video.find("assets")), | ||||
|                 "thumbnails": self._extract_thumbnails(xml_video.find("teaserImage/variants")), | ||||
|                 "description": " ".join(xml_video.find("shareTitle").text.splitlines()), | ||||
|                 "webpage_url": xml_video.find("permalink").text | ||||
|             } | ||||
|             if xml_video.find("author").text: | ||||
|                 video["uploader"] = xml_video.find("author").text | ||||
|             if xml_video.find("broadcastDate").text: | ||||
|                 video["upload_date"] =  "".join(reversed(xml_video.find("broadcastDate").text.split("."))) | ||||
|             videos.append(video) | ||||
|  | ||||
|         if len(videos) > 1: | ||||
|             self._downloader.report_warning( | ||||
|                 'found multiple videos; please ' | ||||
|                 'report this with the video URL to http://yt-dl.org/bug') | ||||
|         if not videos: | ||||
|             raise ExtractorError('No video entries found') | ||||
|         return videos[0] | ||||
|  | ||||
|     def _extract_formats(self, assets): | ||||
|         formats = [{ | ||||
|             "url": asset.find("downloadUrl").text, | ||||
|             "ext": asset.find("mediaType").text, | ||||
|             "format_id": asset.get("type"), | ||||
|             "width": int(asset.find("frameWidth").text), | ||||
|             "height": int(asset.find("frameHeight").text), | ||||
|             "tbr": int(asset.find("bitrateVideo").text), | ||||
|             "abr": int(asset.find("bitrateAudio").text), | ||||
|             "vcodec": asset.find("codecVideo").text, | ||||
|             "container": asset.find("mediaType").text, | ||||
|             "filesize": int(asset.find("size").text), | ||||
|         } for asset in assets.findall("asset") | ||||
|             if asset.find("downloadUrl") is not None] | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|         return formats | ||||
|  | ||||
|     def _extract_thumbnails(self, variants): | ||||
|         thumbnails = [{ | ||||
|             "url": self._BASE_URL + variant.find("url").text, | ||||
|             "width": int(variant.find("width").text), | ||||
|             "height": int(variant.find("height").text), | ||||
|         } for variant in variants.findall("variant")] | ||||
|         thumbnails.sort(key=lambda x: x["width"] * x["height"], reverse=True) | ||||
|         return thumbnails | ||||
| @@ -1,18 +1,20 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import determine_ext | ||||
|  | ||||
|  | ||||
| class BreakIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)' | ||||
|     _VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056', | ||||
|         u'file': u'2468056.mp4', | ||||
|         u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b', | ||||
|         u'info_dict': { | ||||
|             u"title": u"When Girls Act Like D-Bags" | ||||
|         'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056', | ||||
|         'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b', | ||||
|         'info_dict': { | ||||
|             'id': '2468056', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'When Girls Act Like D-Bags', | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -21,18 +23,17 @@ class BreakIE(InfoExtractor): | ||||
|         video_id = mobj.group(1).split("-")[-1] | ||||
|         embed_url = 'http://www.break.com/embed/%s' % video_id | ||||
|         webpage = self._download_webpage(embed_url, video_id) | ||||
|         info_json = self._search_regex(r'var embedVars = ({.*?});', webpage, | ||||
|                                        u'info json', flags=re.DOTALL) | ||||
|         info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>', | ||||
|             webpage, 'info json', flags=re.DOTALL) | ||||
|         info = json.loads(info_json) | ||||
|         video_url = info['videoUri'] | ||||
|         m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url) | ||||
|         if m_youtube is not None: | ||||
|             return self.url_result(m_youtube.group(1), 'Youtube') | ||||
|         final_url = video_url + '?' + info['AuthToken'] | ||||
|         return [{ | ||||
|             'id':        video_id, | ||||
|             'url':       final_url, | ||||
|             'ext':       determine_ext(final_url), | ||||
|             'title':     info['contentName'], | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': final_url, | ||||
|             'title': info['contentName'], | ||||
|             'thumbnail': info['thumbUri'], | ||||
|         }] | ||||
|         } | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
| @@ -8,65 +9,68 @@ from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_parse, | ||||
|     find_xpath_attr, | ||||
|     fix_xml_ampersands, | ||||
|     compat_urlparse, | ||||
|     compat_str, | ||||
|     compat_urllib_request, | ||||
|     compat_parse_qs, | ||||
|  | ||||
|     ExtractorError, | ||||
|     unsmuggle_url, | ||||
|     unescapeHTML, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class BrightcoveIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)' | ||||
|     _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s' | ||||
|     _PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ | ||||
|             u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', | ||||
|             u'file': u'2371591881001.mp4', | ||||
|             u'md5': u'5423e113865d26e40624dce2e4b45d95', | ||||
|             u'note': u'Test Brightcove downloads and detection in GenericIE', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', | ||||
|                 u'uploader': u'8TV', | ||||
|                 u'description': u'md5:a950cc4285c43e44d763d036710cd9cd', | ||||
|             'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', | ||||
|             'file': '2371591881001.mp4', | ||||
|             'md5': '5423e113865d26e40624dce2e4b45d95', | ||||
|             'note': 'Test Brightcove downloads and detection in GenericIE', | ||||
|             'info_dict': { | ||||
|                 'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', | ||||
|                 'uploader': '8TV', | ||||
|                 'description': 'md5:a950cc4285c43e44d763d036710cd9cd', | ||||
|             } | ||||
|         }, | ||||
|         { | ||||
|             # From http://medianetwork.oracle.com/video/player/1785452137001 | ||||
|             u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', | ||||
|             u'file': u'1785452137001.flv', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', | ||||
|                 u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.', | ||||
|                 u'uploader': u'Oracle', | ||||
|             'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', | ||||
|             'file': '1785452137001.flv', | ||||
|             'info_dict': { | ||||
|                 'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', | ||||
|                 'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.', | ||||
|                 'uploader': 'Oracle', | ||||
|             }, | ||||
|         }, | ||||
|         { | ||||
|             # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ | ||||
|             u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'2750934548001', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'This Bracelet Acts as a Personal Thermostat', | ||||
|                 u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0', | ||||
|                 u'uploader': u'Mashable', | ||||
|             'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', | ||||
|             'info_dict': { | ||||
|                 'id': '2750934548001', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'This Bracelet Acts as a Personal Thermostat', | ||||
|                 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', | ||||
|                 'uploader': 'Mashable', | ||||
|             }, | ||||
|         }, | ||||
|         { | ||||
|             # test that the default referer works | ||||
|             # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/ | ||||
|             u'url': u'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'2878862109001', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'Lost in Motion II', | ||||
|                 u'description': u'md5:363109c02998fee92ec02211bd8000df', | ||||
|                 u'uploader': u'National Ballet of Canada', | ||||
|             'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', | ||||
|             'info_dict': { | ||||
|                 'id': '2878862109001', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Lost in Motion II', | ||||
|                 'description': 'md5:363109c02998fee92ec02211bd8000df', | ||||
|                 'uploader': 'National Ballet of Canada', | ||||
|             }, | ||||
|         }, | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     @classmethod | ||||
| @@ -80,18 +84,34 @@ class BrightcoveIE(InfoExtractor): | ||||
|         object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>', | ||||
|                             lambda m: m.group(1) + '/>', object_str) | ||||
|         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608 | ||||
|         object_str = object_str.replace(u'<--', u'<!--') | ||||
|         object_str = object_str.replace('<--', '<!--') | ||||
|         object_str = fix_xml_ampersands(object_str) | ||||
|  | ||||
|         object_doc = xml.etree.ElementTree.fromstring(object_str) | ||||
|         assert u'BrightcoveExperience' in object_doc.attrib['class'] | ||||
|         params = {'flashID': object_doc.attrib['id'], | ||||
|                   'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'], | ||||
|                   } | ||||
|  | ||||
|         fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars') | ||||
|         if fv_el is not None: | ||||
|             flashvars = dict( | ||||
|                 (k, v[0]) | ||||
|                 for k, v in compat_parse_qs(fv_el.attrib['value']).items()) | ||||
|         else: | ||||
|             flashvars = {} | ||||
|  | ||||
|         def find_param(name): | ||||
|             if name in flashvars: | ||||
|                 return flashvars[name] | ||||
|             node = find_xpath_attr(object_doc, './param', 'name', name) | ||||
|             if node is not None: | ||||
|                 return node.attrib['value'] | ||||
|             return None | ||||
|  | ||||
|         params = {} | ||||
|  | ||||
|         playerID = find_param('playerID') | ||||
|         if playerID is None: | ||||
|             raise ExtractorError('Cannot find player ID') | ||||
|         params['playerID'] = playerID | ||||
|  | ||||
|         playerKey = find_param('playerKey') | ||||
|         # Not all pages define this value | ||||
|         if playerKey is not None: | ||||
| @@ -108,18 +128,32 @@ class BrightcoveIE(InfoExtractor): | ||||
|  | ||||
|     @classmethod | ||||
|     def _extract_brightcove_url(cls, webpage): | ||||
|         """Try to extract the brightcove url from the wepbage, returns None | ||||
|         """Try to extract the brightcove url from the webpage, returns None | ||||
|         if it can't be found | ||||
|         """ | ||||
|         m_brightcove = re.search( | ||||
|             r'<object[^>]+?class=([\'"])[^>]*?BrightcoveExperience.*?\1.+?</object>', | ||||
|             webpage, re.DOTALL) | ||||
|         if m_brightcove is not None: | ||||
|             return cls._build_brighcove_url(m_brightcove.group()) | ||||
|         else: | ||||
|             return None | ||||
|         urls = cls._extract_brightcove_urls(webpage) | ||||
|         return urls[0] if urls else None | ||||
|  | ||||
|     @classmethod | ||||
|     def _extract_brightcove_urls(cls, webpage): | ||||
|         """Return a list of all Brightcove URLs from the webpage """ | ||||
|  | ||||
|         url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage) | ||||
|         if url_m: | ||||
|             return [unescapeHTML(url_m.group(1))] | ||||
|  | ||||
|         matches = re.findall( | ||||
|             r'''(?sx)<object | ||||
|             (?: | ||||
|                 [^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] | | ||||
|                 [^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/ | ||||
|             ).+?</object>''', | ||||
|             webpage) | ||||
|         return [cls._build_brighcove_url(m) for m in matches] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         url, smuggled_data = unsmuggle_url(url, {}) | ||||
|  | ||||
|         # Change the 'videoId' and others field to '@videoPlayer' | ||||
|         url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url) | ||||
|         # Change bckey (used by bcove.me urls) to playerKey | ||||
| @@ -130,9 +164,10 @@ class BrightcoveIE(InfoExtractor): | ||||
|  | ||||
|         videoPlayer = query.get('@videoPlayer') | ||||
|         if videoPlayer: | ||||
|             return self._get_video_info(videoPlayer[0], query_str, query, | ||||
|                 # We set the original url as the default 'Referer' header | ||||
|                 referer=url) | ||||
|             # We set the original url as the default 'Referer' header | ||||
|             referer = smuggled_data.get('Referer', url) | ||||
|             return self._get_video_info( | ||||
|                 videoPlayer[0], query_str, query, referer=referer) | ||||
|         else: | ||||
|             player_key = query['playerKey'] | ||||
|             return self._get_playlist_info(player_key[0]) | ||||
| @@ -151,16 +186,18 @@ class BrightcoveIE(InfoExtractor): | ||||
|         info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json') | ||||
|         info = json.loads(info)['data'] | ||||
|         video_info = info['programmedContent']['videoPlayer']['mediaDTO'] | ||||
|         video_info['_youtubedl_adServerURL'] = info.get('adServerURL') | ||||
|  | ||||
|         return self._extract_video_info(video_info) | ||||
|  | ||||
|     def _get_playlist_info(self, player_key): | ||||
|         playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key, | ||||
|                                                player_key, u'Downloading playlist information') | ||||
|         info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key | ||||
|         playlist_info = self._download_webpage( | ||||
|             info_url, player_key, 'Downloading playlist information') | ||||
|  | ||||
|         json_data = json.loads(playlist_info) | ||||
|         if 'videoList' not in json_data: | ||||
|             raise ExtractorError(u'Empty playlist') | ||||
|             raise ExtractorError('Empty playlist') | ||||
|         playlist_info = json_data['videoList'] | ||||
|         videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']] | ||||
|  | ||||
| @@ -170,7 +207,7 @@ class BrightcoveIE(InfoExtractor): | ||||
|     def _extract_video_info(self, video_info): | ||||
|         info = { | ||||
|             'id': compat_str(video_info['id']), | ||||
|             'title': video_info['displayName'], | ||||
|             'title': video_info['displayName'].strip(), | ||||
|             'description': video_info.get('shortDescription'), | ||||
|             'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'), | ||||
|             'uploader': video_info.get('publisherName'), | ||||
| @@ -188,6 +225,23 @@ class BrightcoveIE(InfoExtractor): | ||||
|             info.update({ | ||||
|                 'url': video_info['FLVFullLengthURL'], | ||||
|             }) | ||||
|         else: | ||||
|             raise ExtractorError(u'Unable to extract video url for %s' % info['id']) | ||||
|  | ||||
|         if self._downloader.params.get('include_ads', False): | ||||
|             adServerURL = video_info.get('_youtubedl_adServerURL') | ||||
|             if adServerURL: | ||||
|                 ad_info = { | ||||
|                     '_type': 'url', | ||||
|                     'url': adServerURL, | ||||
|                 } | ||||
|                 if 'url' in info: | ||||
|                     return { | ||||
|                         '_type': 'playlist', | ||||
|                         'title': info['title'], | ||||
|                         'entries': [ad_info, info], | ||||
|                     } | ||||
|                 else: | ||||
|                     return ad_info | ||||
|  | ||||
|         if 'url' not in info and not info.get('formats'): | ||||
|             raise ExtractorError('Unable to extract video url for %s' % info['id']) | ||||
|         return info | ||||
|   | ||||
| @@ -1,21 +1,21 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import determine_ext | ||||
|  | ||||
|  | ||||
| class C56IE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)' | ||||
|     IE_NAME = u'56.com' | ||||
|  | ||||
|     _TEST ={ | ||||
|         u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html', | ||||
|         u'file': u'93440716.flv', | ||||
|         u'md5': u'e59995ac63d0457783ea05f93f12a866', | ||||
|         u'info_dict': { | ||||
|             u'title': u'网事知多少 第32期:车怒', | ||||
|     IE_NAME = '56.com' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html', | ||||
|         'file': '93440716.flv', | ||||
|         'md5': 'e59995ac63d0457783ea05f93f12a866', | ||||
|         'info_dict': { | ||||
|             'title': '网事知多少 第32期:车怒', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
| @@ -23,14 +23,18 @@ class C56IE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) | ||||
|         text_id = mobj.group('textid') | ||||
|         info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id, | ||||
|                                            text_id, u'Downloading video info') | ||||
|                                            text_id, 'Downloading video info') | ||||
|         info = json.loads(info_page)['info'] | ||||
|         best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1] | ||||
|         video_url = best_format['url'] | ||||
|         formats = [{ | ||||
|             'format_id': f['type'], | ||||
|             'filesize': int(f['filesize']), | ||||
|             'url': f['url'] | ||||
|         } for f in info['rfiles']] | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return {'id': info['vid'], | ||||
|                 'title': info['Subject'], | ||||
|                 'url': video_url, | ||||
|                 'ext': determine_ext(video_url), | ||||
|                 'thumbnail': info.get('bimg') or info.get('img'), | ||||
|                 } | ||||
|         return { | ||||
|             'id': info['vid'], | ||||
|             'title': info['Subject'], | ||||
|             'formats': formats, | ||||
|             'thumbnail': info.get('bimg') or info.get('img'), | ||||
|         } | ||||
|   | ||||
							
								
								
									
										48
									
								
								youtube_dl/extractor/canal13cl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								youtube_dl/extractor/canal13cl.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class Canal13clIE(InfoExtractor): | ||||
|     _VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', | ||||
|         'md5': '4cb1fa38adcad8fea88487a078831755', | ||||
|         'info_dict': { | ||||
|             'id': '1403022125', | ||||
|             'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda', | ||||
|             'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         display_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, display_id) | ||||
|  | ||||
|         title = self._html_search_meta( | ||||
|             'twitter:title', webpage, 'title', fatal=True) | ||||
|         description = self._html_search_meta( | ||||
|             'twitter:description', webpage, 'description') | ||||
|         url = self._html_search_regex( | ||||
|             r'articuloVideo = \"(.*?)\"', webpage, 'url') | ||||
|         real_id = self._search_regex( | ||||
|             r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id) | ||||
|         thumbnail = self._html_search_regex( | ||||
|             r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail') | ||||
|  | ||||
|         return { | ||||
|             'id': real_id, | ||||
|             'display_id': display_id, | ||||
|             'url': url, | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'ext': 'mp4', | ||||
|             'thumbnail': thumbnail, | ||||
|         } | ||||
| @@ -1,4 +1,6 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -9,11 +11,12 @@ class Canalc2IE(InfoExtractor): | ||||
|     _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui', | ||||
|         u'file': u'12163.mp4', | ||||
|         u'md5': u'060158428b650f896c542dfbb3d6487f', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Terrasses du Numérique' | ||||
|         'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui', | ||||
|         'md5': '060158428b650f896c542dfbb3d6487f', | ||||
|         'info_dict': { | ||||
|             'id': '12163', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Terrasses du Numérique' | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -28,10 +31,11 @@ class Canalc2IE(InfoExtractor): | ||||
|         video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name | ||||
|  | ||||
|         title = self._html_search_regex( | ||||
|             r'class="evenement8">(.*?)</a>', webpage, u'title') | ||||
|          | ||||
|         return {'id': video_id, | ||||
|                 'ext': 'mp4', | ||||
|                 'url': video_url, | ||||
|                 'title': title, | ||||
|                 } | ||||
|             r'class="evenement8">(.*?)</a>', webpage, 'title') | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'ext': 'mp4', | ||||
|             'url': video_url, | ||||
|             'title': title, | ||||
|         } | ||||
|   | ||||
							
								
								
									
										126
									
								
								youtube_dl/extractor/ceskatelevize.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								youtube_dl/extractor/ceskatelevize.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,126 @@ | ||||
| # -*- coding: utf-8 -*- | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_request, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class CeskaTelevizeIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)' | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/213512120230004-spanelska-chripka', | ||||
|             'info_dict': { | ||||
|                 'id': '213512120230004', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'První republika: Španělská chřipka', | ||||
|                 'duration': 3107.4, | ||||
|             }, | ||||
|             'params': { | ||||
|                 'skip_download': True,  # requires rtmpdump | ||||
|             }, | ||||
|             'skip': 'Works only from Czech Republic.', | ||||
|         }, | ||||
|         { | ||||
|             'url': 'http://www.ceskatelevize.cz/ivysilani/1030584952-tsatsiki-maminka-a-policajt', | ||||
|             'info_dict': { | ||||
|                 'id': '20138143440', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'Tsatsiki, maminka a policajt', | ||||
|                 'duration': 6754.1, | ||||
|             }, | ||||
|             'params': { | ||||
|                 'skip_download': True,  # requires rtmpdump | ||||
|             }, | ||||
|             'skip': 'Works only from Czech Republic.', | ||||
|         }, | ||||
|         { | ||||
|             'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina', | ||||
|             'info_dict': { | ||||
|                 'id': '14716', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'První republika: Zpěvačka z Dupárny Bobina', | ||||
|                 'duration': 90, | ||||
|             }, | ||||
|             'params': { | ||||
|                 'skip_download': True,  # requires rtmpdump | ||||
|             }, | ||||
|         }, | ||||
|     ] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         url = url.replace('/porady/', '/ivysilani/').replace('/video/', '') | ||||
|  | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.' | ||||
|         if '%s</p>' % NOT_AVAILABLE_STRING in webpage: | ||||
|             raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) | ||||
|  | ||||
|         typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type') | ||||
|         episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id') | ||||
|  | ||||
|         data = { | ||||
|             'playlist[0][type]': typ, | ||||
|             'playlist[0][id]': episode_id, | ||||
|             'requestUrl': compat_urllib_parse_urlparse(url).path, | ||||
|             'requestSource': 'iVysilani', | ||||
|         } | ||||
|  | ||||
|         req = compat_urllib_request.Request('http://www.ceskatelevize.cz/ivysilani/ajax/get-playlist-url', | ||||
|                                             data=compat_urllib_parse.urlencode(data)) | ||||
|  | ||||
|         req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|         req.add_header('x-addr', '127.0.0.1') | ||||
|         req.add_header('X-Requested-With', 'XMLHttpRequest') | ||||
|         req.add_header('Referer', url) | ||||
|  | ||||
|         playlistpage = self._download_json(req, video_id) | ||||
|  | ||||
|         req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlistpage['url'])) | ||||
|         req.add_header('Referer', url) | ||||
|  | ||||
|         playlist = self._download_xml(req, video_id) | ||||
|          | ||||
|         formats = [] | ||||
|         for i in playlist.find('smilRoot/body'): | ||||
|             if 'AD' not in i.attrib['id']: | ||||
|                 base_url = i.attrib['base'] | ||||
|                 parsedurl = compat_urllib_parse_urlparse(base_url) | ||||
|                 duration = i.attrib['duration'] | ||||
|  | ||||
|                 for video in i.findall('video'): | ||||
|                     if video.attrib['label'] != 'AD': | ||||
|                         format_id = video.attrib['label'] | ||||
|                         play_path = video.attrib['src'] | ||||
|                         vbr = int(video.attrib['system-bitrate']) | ||||
|  | ||||
|                         formats.append({ | ||||
|                             'format_id': format_id, | ||||
|                             'url': base_url, | ||||
|                             'vbr': vbr, | ||||
|                             'play_path': play_path, | ||||
|                             'app': parsedurl.path[1:] + '?' + parsedurl.query, | ||||
|                             'rtmp_live': True, | ||||
|                             'ext': 'flv', | ||||
|                         }) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': episode_id, | ||||
|             'title': self._html_search_regex(r'<title>(.+?) — iVysílání — Česká televize</title>', webpage, 'title'), | ||||
|             'duration': float(duration), | ||||
|             'formats': formats, | ||||
|         } | ||||
| @@ -1,4 +1,4 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| @@ -11,38 +11,40 @@ class Channel9IE(InfoExtractor): | ||||
|  | ||||
|     The type of provided URL (video or playlist) is determined according to | ||||
|     meta Search.PageType from web page HTML rather than URL itself, as it is | ||||
|     not always possible to do.     | ||||
|     not always possible to do. | ||||
|     ''' | ||||
|     IE_DESC = u'Channel 9' | ||||
|     IE_NAME = u'channel9' | ||||
|     _VALID_URL = r'^https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?' | ||||
|     IE_DESC = 'Channel 9' | ||||
|     IE_NAME = 'channel9' | ||||
|     _VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?' | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             u'url': u'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002', | ||||
|             u'file': u'Events_TechEd_Australia_2013_KOS002.mp4', | ||||
|             u'md5': u'bbd75296ba47916b754e73c3a4bbdf10', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'Developer Kick-Off Session: Stuff We Love', | ||||
|                 u'description': u'md5:c08d72240b7c87fcecafe2692f80e35f', | ||||
|                 u'duration': 4576, | ||||
|                 u'thumbnail': u'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg', | ||||
|                 u'session_code': u'KOS002', | ||||
|                 u'session_day': u'Day 1', | ||||
|                 u'session_room': u'Arena 1A', | ||||
|                 u'session_speakers': [ u'Ed Blankenship', u'Andrew Coates', u'Brady Gaster', u'Patrick Klug', u'Mads Kristensen' ], | ||||
|             'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002', | ||||
|             'md5': 'bbd75296ba47916b754e73c3a4bbdf10', | ||||
|             'info_dict': { | ||||
|                 'id': 'Events/TechEd/Australia/2013/KOS002', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Developer Kick-Off Session: Stuff We Love', | ||||
|                 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f', | ||||
|                 'duration': 4576, | ||||
|                 'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg', | ||||
|                 'session_code': 'KOS002', | ||||
|                 'session_day': 'Day 1', | ||||
|                 'session_room': 'Arena 1A', | ||||
|                 'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ], | ||||
|             }, | ||||
|         }, | ||||
|         { | ||||
|             u'url': u'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing', | ||||
|             u'file': u'posts_Self-service-BI-with-Power-BI-nuclear-testing.mp4', | ||||
|             u'md5': u'b43ee4529d111bc37ba7ee4f34813e68', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'Self-service BI with Power BI - nuclear testing', | ||||
|                 u'description': u'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', | ||||
|                 u'duration': 1540, | ||||
|                 u'thumbnail': u'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', | ||||
|                 u'authors': [ u'Mike Wilmot' ], | ||||
|             'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing', | ||||
|             'md5': 'b43ee4529d111bc37ba7ee4f34813e68', | ||||
|             'info_dict': { | ||||
|                 'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Self-service BI with Power BI - nuclear testing', | ||||
|                 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', | ||||
|                 'duration': 1540, | ||||
|                 'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', | ||||
|                 'authors': [ 'Mike Wilmot' ], | ||||
|             }, | ||||
|         } | ||||
|     ] | ||||
| @@ -60,7 +62,7 @@ class Channel9IE(InfoExtractor): | ||||
|             return 0 | ||||
|         units = m.group('units') | ||||
|         try: | ||||
|             exponent = [u'B', u'KB', u'MB', u'GB', u'TB', u'PB', u'EB', u'ZB', u'YB'].index(units.upper()) | ||||
|             exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper()) | ||||
|         except ValueError: | ||||
|             return 0 | ||||
|         size = float(m.group('size')) | ||||
| @@ -76,21 +78,25 @@ class Channel9IE(InfoExtractor): | ||||
|             </div>)?                                                # File size part may be missing | ||||
|         ''' | ||||
|         # Extract known formats | ||||
|         formats = [{'url': x.group('url'), | ||||
|                  'format_id': x.group('quality'), | ||||
|                  'format_note': x.group('note'), | ||||
|                  'format': '%s (%s)' % (x.group('quality'), x.group('note')),  | ||||
|                  'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate | ||||
|                  } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] | ||||
|         # Sort according to known formats list | ||||
|         formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id'])) | ||||
|         formats = [{ | ||||
|             'url': x.group('url'), | ||||
|             'format_id': x.group('quality'), | ||||
|             'format_note': x.group('note'), | ||||
|             'format': '%s (%s)' % (x.group('quality'), x.group('note')), | ||||
|             'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate | ||||
|             'preference': self._known_formats.index(x.group('quality')), | ||||
|             'vcodec': 'none' if x.group('note') == 'Audio only' else None, | ||||
|         } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return formats | ||||
|  | ||||
|     def _extract_title(self, html): | ||||
|         title = self._html_search_meta(u'title', html, u'title') | ||||
|         title = self._html_search_meta('title', html, 'title') | ||||
|         if title is None:            | ||||
|             title = self._og_search_title(html) | ||||
|             TITLE_SUFFIX = u' (Channel 9)' | ||||
|             TITLE_SUFFIX = ' (Channel 9)' | ||||
|             if title is not None and title.endswith(TITLE_SUFFIX): | ||||
|                 title = title[:-len(TITLE_SUFFIX)] | ||||
|         return title | ||||
| @@ -106,7 +112,7 @@ class Channel9IE(InfoExtractor): | ||||
|         m = re.search(DESCRIPTION_REGEX, html) | ||||
|         if m is not None: | ||||
|             return m.group('description') | ||||
|         return self._html_search_meta(u'description', html, u'description') | ||||
|         return self._html_search_meta('description', html, 'description') | ||||
|  | ||||
|     def _extract_duration(self, html): | ||||
|         m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html) | ||||
| @@ -168,7 +174,7 @@ class Channel9IE(InfoExtractor): | ||||
|  | ||||
|         # Nothing to download | ||||
|         if len(formats) == 0 and slides is None and zip_ is None: | ||||
|             self._downloader.report_warning(u'None of recording, slides or zip are available for %s' % content_path) | ||||
|             self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path) | ||||
|             return | ||||
|  | ||||
|         # Extract meta | ||||
| @@ -240,7 +246,7 @@ class Channel9IE(InfoExtractor): | ||||
|         return contents | ||||
|  | ||||
|     def _extract_list(self, content_path): | ||||
|         rss = self._download_xml(self._RSS_URL % content_path, content_path, u'Downloading RSS') | ||||
|         rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS') | ||||
|         entries = [self.url_result(session_url.text, 'Channel9') | ||||
|                    for session_url in rss.findall('./channel/item/link')] | ||||
|         title_text = rss.find('./channel/title').text | ||||
| @@ -250,11 +256,11 @@ class Channel9IE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         content_path = mobj.group('contentpath') | ||||
|  | ||||
|         webpage = self._download_webpage(url, content_path, u'Downloading web page') | ||||
|         webpage = self._download_webpage(url, content_path, 'Downloading web page') | ||||
|  | ||||
|         page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage) | ||||
|         if page_type_m is None: | ||||
|             raise ExtractorError(u'Search.PageType not found, don\'t know how to process this page', expected=True) | ||||
|             raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True) | ||||
|  | ||||
|         page_type = page_type_m.group('pagetype') | ||||
|         if page_type == 'List':         # List page, may contain list of 'item'-like objects | ||||
| @@ -264,4 +270,4 @@ class Channel9IE(InfoExtractor): | ||||
|         elif page_type == 'Session':    # Event session page, may contain downloadable content | ||||
|             return self._extract_session(webpage, content_path) | ||||
|         else: | ||||
|             raise ExtractorError(u'Unexpected Search.PageType %s' % page_type, expected=True) | ||||
|             raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True) | ||||
							
								
								
									
										97
									
								
								youtube_dl/extractor/chilloutzone.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								youtube_dl/extractor/chilloutzone.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import base64 | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     clean_html, | ||||
|     ExtractorError | ||||
| ) | ||||
|  | ||||
|  | ||||
| class ChilloutzoneIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html' | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html', | ||||
|         'md5': 'a76f3457e813ea0037e5244f509e66d1', | ||||
|         'info_dict': { | ||||
|             'id': 'enemene-meck-alle-katzen-weg', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Enemene Meck - Alle Katzen weg', | ||||
|             'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?', | ||||
|         }, | ||||
|     }, { | ||||
|         'note': 'Video hosted at YouTube', | ||||
|         'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html', | ||||
|         'info_dict': { | ||||
|             'id': '1YVQaAgHyRU', | ||||
|             'ext': 'mp4', | ||||
|             'title': '16 Photos Taken 1 Second Before Disaster', | ||||
|             'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814', | ||||
|             'uploader': 'BuzzFeedVideo', | ||||
|             'uploader_id': 'BuzzFeedVideo', | ||||
|             'upload_date': '20131105', | ||||
|         }, | ||||
|     }, { | ||||
|         'note': 'Video hosted at Vimeo', | ||||
|         'url': 'http://www.chilloutzone.net/video/icon-blending.html', | ||||
|         'md5': '2645c678b8dc4fefcc0e1b60db18dac1', | ||||
|         'info_dict': { | ||||
|             'id': '85523671', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'The Sunday Times - Icons', | ||||
|             'description': 'md5:3e1c0dc6047498d6728dcdaad0891762', | ||||
|             'uploader': 'Us', | ||||
|             'uploader_id': 'usfilms', | ||||
|             'upload_date': '20140131' | ||||
|         }, | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         base64_video_info = self._html_search_regex( | ||||
|             r'var cozVidData = "(.+?)";', webpage, 'video data') | ||||
|         decoded_video_info = base64.b64decode(base64_video_info).decode("utf-8") | ||||
|         video_info_dict = json.loads(decoded_video_info) | ||||
|  | ||||
|         # get video information from dict | ||||
|         video_url = video_info_dict['mediaUrl'] | ||||
|         description = clean_html(video_info_dict.get('description')) | ||||
|         title = video_info_dict['title'] | ||||
|         native_platform = video_info_dict['nativePlatform'] | ||||
|         native_video_id = video_info_dict['nativeVideoId'] | ||||
|         source_priority = video_info_dict['sourcePriority'] | ||||
|  | ||||
|         # If nativePlatform is None a fallback mechanism is used (i.e. youtube embed) | ||||
|         if native_platform is None: | ||||
|             youtube_url = self._html_search_regex( | ||||
|                 r'<iframe.* src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"', | ||||
|                 webpage, 'fallback video URL', default=None) | ||||
|             if youtube_url is not None: | ||||
|                 return self.url_result(youtube_url, ie='Youtube') | ||||
|  | ||||
|         # Non Fallback: Decide to use native source (e.g. youtube or vimeo) or | ||||
|         # the own CDN | ||||
|         if source_priority == 'native': | ||||
|             if native_platform == 'youtube': | ||||
|                 return self.url_result(native_video_id, ie='Youtube') | ||||
|             if native_platform == 'vimeo': | ||||
|                 return self.url_result( | ||||
|                     'http://vimeo.com/' + native_video_id, ie='Vimeo') | ||||
|  | ||||
|         if not video_url: | ||||
|             raise ExtractorError('No video found') | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'ext': 'mp4', | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|         } | ||||
| @@ -1,4 +1,5 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -8,73 +9,63 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class CinemassacreIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?(?P<url>cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?)(?:[/?].*)?' | ||||
|     _TESTS = [{ | ||||
|         u'url': u'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/', | ||||
|         u'file': u'19911.flv', | ||||
|         u'info_dict': { | ||||
|             u'upload_date': u'20121110', | ||||
|             u'title': u'“Angry Video Game Nerd: The Movie” – Trailer', | ||||
|             u'description': u'md5:fb87405fcb42a331742a0dce2708560b', | ||||
|     _VALID_URL = r'http://(?:www\.)?cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?' | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/', | ||||
|             'file': '19911.mp4', | ||||
|             'md5': 'fde81fbafaee331785f58cd6c0d46190', | ||||
|             'info_dict': { | ||||
|                 'upload_date': '20121110', | ||||
|                 'title': '“Angry Video Game Nerd: The Movie” – Trailer', | ||||
|                 'description': 'md5:fb87405fcb42a331742a0dce2708560b', | ||||
|             }, | ||||
|         }, | ||||
|         u'params': { | ||||
|             # rtmp download | ||||
|             u'skip_download': True, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940', | ||||
|         u'file': u'521be8ef82b16.flv', | ||||
|         u'info_dict': { | ||||
|             u'upload_date': u'20131002', | ||||
|             u'title': u'The Mummy’s Hand (1940)', | ||||
|         }, | ||||
|         u'params': { | ||||
|             # rtmp download | ||||
|             u'skip_download': True, | ||||
|         }, | ||||
|     }] | ||||
|         { | ||||
|             'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940', | ||||
|             'file': '521be8ef82b16.mp4', | ||||
|             'md5': 'd72f10cd39eac4215048f62ab477a511', | ||||
|             'info_dict': { | ||||
|                 'upload_date': '20131002', | ||||
|                 'title': 'The Mummy’s Hand (1940)', | ||||
|             }, | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|  | ||||
|         webpage_url = u'http://' + mobj.group('url') | ||||
|         webpage = self._download_webpage(webpage_url, None) # Don't know video id yet | ||||
|         webpage = self._download_webpage(url, None)  # Don't know video id yet | ||||
|         video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d') | ||||
|         mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage) | ||||
|         if not mobj: | ||||
|             raise ExtractorError(u'Can\'t extract embed url and video id') | ||||
|         playerdata_url = mobj.group(u'embed_url') | ||||
|         video_id = mobj.group(u'video_id') | ||||
|             raise ExtractorError('Can\'t extract embed url and video id') | ||||
|         playerdata_url = mobj.group('embed_url') | ||||
|         video_id = mobj.group('video_id') | ||||
|  | ||||
|         video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|', | ||||
|             webpage, u'title') | ||||
|             webpage, 'title') | ||||
|         video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>', | ||||
|             webpage, u'description', flags=re.DOTALL, fatal=False) | ||||
|             webpage, 'description', flags=re.DOTALL, fatal=False) | ||||
|         if len(video_description) == 0: | ||||
|             video_description = None | ||||
|  | ||||
|         playerdata = self._download_webpage(playerdata_url, video_id) | ||||
|         url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url') | ||||
|  | ||||
|         sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file') | ||||
|         hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file') | ||||
|         video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False) | ||||
|         sd_url = self._html_search_regex(r'file: \'(?P<sd_file>[^\']+)\', label: \'SD\'', playerdata, 'sd_file') | ||||
|         hd_url = self._html_search_regex(r'file: \'(?P<hd_file>[^\']+)\', label: \'HD\'', playerdata, 'hd_file') | ||||
|         video_thumbnail = self._html_search_regex(r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False) | ||||
|  | ||||
|         formats = [ | ||||
|             { | ||||
|                 'url': url, | ||||
|                 'play_path': 'mp4:' + sd_file, | ||||
|                 'rtmp_live': True, # workaround | ||||
|                 'ext': 'flv', | ||||
|                 'url': sd_url, | ||||
|                 'ext': 'mp4', | ||||
|                 'format': 'sd', | ||||
|                 'format_id': 'sd', | ||||
|             }, | ||||
|             { | ||||
|                 'url': url, | ||||
|                 'play_path': 'mp4:' + hd_file, | ||||
|                 'rtmp_live': True, # workaround | ||||
|                 'ext': 'flv', | ||||
|                 'url': hd_url, | ||||
|                 'ext': 'mp4', | ||||
|                 'format': 'hd', | ||||
|                 'format_id': 'hd', | ||||
|             }, | ||||
|   | ||||
							
								
								
									
										56
									
								
								youtube_dl/extractor/cliphunter.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								youtube_dl/extractor/cliphunter.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| translation_table = { | ||||
|     'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n', | ||||
|     'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r', | ||||
|     'y': 'l', 'z': 'i', | ||||
|     '$': ':', '&': '.', '(': '=', '^': '&', '=': '/', | ||||
| } | ||||
|  | ||||
|  | ||||
| class CliphunterIE(InfoExtractor): | ||||
|     IE_NAME = 'cliphunter' | ||||
|  | ||||
|     _VALID_URL = r'''(?x)http://(?:www\.)?cliphunter\.com/w/ | ||||
|         (?P<id>[0-9]+)/ | ||||
|         (?P<seo>.+?)(?:$|[#\?]) | ||||
|     ''' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo', | ||||
|         'file': '1012420.flv', | ||||
|         'md5': '15e7740f30428abf70f4223478dc1225', | ||||
|         'info_dict': { | ||||
|             'title': 'Fun Jynx Maze solo', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         pl_fiji = self._search_regex( | ||||
|             r'pl_fiji = \'([^\']+)\'', webpage, 'video data') | ||||
|         pl_c_qual = self._search_regex( | ||||
|             r'pl_c_qual = "(.)"', webpage, 'video quality') | ||||
|         video_title = self._search_regex( | ||||
|             r'mediaTitle = "([^"]+)"', webpage, 'title') | ||||
|  | ||||
|         video_url = ''.join(translation_table.get(c, c) for c in pl_fiji) | ||||
|  | ||||
|         formats = [{ | ||||
|             'url': video_url, | ||||
|             'format_id': pl_c_qual, | ||||
|         }] | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': video_title, | ||||
|             'formats': formats, | ||||
|         } | ||||
| @@ -3,7 +3,7 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     find_xpath_attr, | ||||
|     fix_xml_all_ampersand, | ||||
|     fix_xml_ampersands | ||||
| ) | ||||
|  | ||||
|  | ||||
| @@ -33,7 +33,7 @@ class ClipsyndicateIE(InfoExtractor): | ||||
|         pdoc = self._download_xml( | ||||
|             'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars, | ||||
|             video_id, u'Downloading video info', | ||||
|             transform_source=fix_xml_all_ampersand)  | ||||
|             transform_source=fix_xml_ampersands) | ||||
|  | ||||
|         track_doc = pdoc.find('trackList/track') | ||||
|         def find_param(name): | ||||
|   | ||||
							
								
								
									
										19
									
								
								youtube_dl/extractor/cmt.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								youtube_dl/extractor/cmt.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| from .mtv import MTVIE | ||||
|  | ||||
| class CMTIE(MTVIE): | ||||
|     IE_NAME = u'cmt.com' | ||||
|     _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml' | ||||
|     _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/' | ||||
|  | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061', | ||||
|             u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'989124', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"', | ||||
|                 u'description': u'Blame It All On My Roots', | ||||
|             }, | ||||
|         }, | ||||
|     ] | ||||
| @@ -1,7 +1,13 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import determine_ext | ||||
| from ..utils import ( | ||||
|     int_or_none, | ||||
|     parse_duration, | ||||
|     url_basename, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class CNNIE(InfoExtractor): | ||||
| @@ -9,21 +15,24 @@ class CNNIE(InfoExtractor): | ||||
|         (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))''' | ||||
|  | ||||
|     _TESTS = [{ | ||||
|         u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', | ||||
|         u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4', | ||||
|         u'md5': u'3e6121ea48df7e2259fe73a0628605c4', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Nadal wins 8th French Open title', | ||||
|             u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', | ||||
|         'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', | ||||
|         'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4', | ||||
|         'md5': '3e6121ea48df7e2259fe73a0628605c4', | ||||
|         'info_dict': { | ||||
|             'title': 'Nadal wins 8th French Open title', | ||||
|             'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', | ||||
|             'duration': 135, | ||||
|             'upload_date': '20130609', | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", | ||||
|         u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4", | ||||
|         u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e", | ||||
|         u"info_dict": { | ||||
|             u"title": "Student's epic speech stuns new freshmen", | ||||
|             u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"" | ||||
|         "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", | ||||
|         "file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4", | ||||
|         "md5": "b5cc60c60a3477d185af8f19a2a26f4e", | ||||
|         "info_dict": { | ||||
|             "title": "Student's epic speech stuns new freshmen", | ||||
|             "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"", | ||||
|             "upload_date": "20130821", | ||||
|         } | ||||
|     }] | ||||
|  | ||||
| @@ -31,26 +40,87 @@ class CNNIE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         path = mobj.group('path') | ||||
|         page_title = mobj.group('title') | ||||
|         info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path | ||||
|         info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path | ||||
|         info = self._download_xml(info_url, page_title) | ||||
|  | ||||
|         formats = [] | ||||
|         rex = re.compile(r'''(?x) | ||||
|             (?P<width>[0-9]+)x(?P<height>[0-9]+) | ||||
|             (?:_(?P<bitrate>[0-9]+)k)? | ||||
|         ''') | ||||
|         for f in info.findall('files/file'): | ||||
|             mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate']) | ||||
|             if mf is not None: | ||||
|                 formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text)) | ||||
|         formats = sorted(formats) | ||||
|         (_,_,_, video_path) = formats[-1] | ||||
|         video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path | ||||
|             video_url = 'http://ht.cdn.turner.com/cnn/big%s' % (f.text.strip()) | ||||
|             fdct = { | ||||
|                 'format_id': f.attrib['bitrate'], | ||||
|                 'url': video_url, | ||||
|             } | ||||
|  | ||||
|             mf = rex.match(f.attrib['bitrate']) | ||||
|             if mf: | ||||
|                 fdct['width'] = int(mf.group('width')) | ||||
|                 fdct['height'] = int(mf.group('height')) | ||||
|                 fdct['tbr'] = int_or_none(mf.group('bitrate')) | ||||
|             else: | ||||
|                 mf = rex.search(f.text) | ||||
|                 if mf: | ||||
|                     fdct['width'] = int(mf.group('width')) | ||||
|                     fdct['height'] = int(mf.group('height')) | ||||
|                     fdct['tbr'] = int_or_none(mf.group('bitrate')) | ||||
|                 else: | ||||
|                     mi = re.match(r'ios_(audio|[0-9]+)$', f.attrib['bitrate']) | ||||
|                     if mi: | ||||
|                         if mi.group(1) == 'audio': | ||||
|                             fdct['vcodec'] = 'none' | ||||
|                             fdct['ext'] = 'm4a' | ||||
|                         else: | ||||
|                             fdct['tbr'] = int(mi.group(1)) | ||||
|  | ||||
|             formats.append(fdct) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')]) | ||||
|         thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails] | ||||
|  | ||||
|         return {'id': info.attrib['id'], | ||||
|                 'title': info.find('headline').text, | ||||
|                 'url': video_url, | ||||
|                 'ext': determine_ext(video_url), | ||||
|                 'thumbnail': thumbnails[-1][1], | ||||
|                 'thumbnails': thumbs_dict, | ||||
|                 'description': info.find('description').text, | ||||
|                 } | ||||
|         metas_el = info.find('metas') | ||||
|         upload_date = ( | ||||
|             metas_el.attrib.get('version') if metas_el is not None else None) | ||||
|  | ||||
|         duration_el = info.find('length') | ||||
|         duration = parse_duration(duration_el.text) | ||||
|  | ||||
|         return { | ||||
|             'id': info.attrib['id'], | ||||
|             'title': info.find('headline').text, | ||||
|             'formats': formats, | ||||
|             'thumbnail': thumbnails[-1][1], | ||||
|             'thumbnails': thumbs_dict, | ||||
|             'description': info.find('description').text, | ||||
|             'duration': duration, | ||||
|             'upload_date': upload_date, | ||||
|         } | ||||
|  | ||||
|  | ||||
| class CNNBlogsIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+' | ||||
|     _TEST = { | ||||
|         'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/', | ||||
|         'md5': '3e56f97b0b6ffb4b79f4ea0749551084', | ||||
|         'info_dict': { | ||||
|             'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Criminalizing journalism?', | ||||
|             'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.', | ||||
|             'upload_date': '20140209', | ||||
|         }, | ||||
|         'add_ie': ['CNN'], | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         webpage = self._download_webpage(url, url_basename(url)) | ||||
|         cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url') | ||||
|         return { | ||||
|             '_type': 'url', | ||||
|             'url': cnn_url, | ||||
|             'ie_key': CNNIE.ie_key(), | ||||
|         } | ||||
|   | ||||
| @@ -1,82 +1,102 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_parse_urlparse, | ||||
|     determine_ext, | ||||
|  | ||||
|     ExtractorError, | ||||
| ) | ||||
| from ..utils import int_or_none | ||||
|  | ||||
|  | ||||
| class CollegeHumorIE(InfoExtractor): | ||||
|     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$' | ||||
|  | ||||
|     _TESTS = [{ | ||||
|         u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', | ||||
|         u'file': u'6902724.mp4', | ||||
|         u'md5': u'1264c12ad95dca142a9f0bf7968105a0', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Comic-Con Cosplay Catastrophe', | ||||
|             u'description': u'Fans get creative this year at San Diego.  Too creative.  And yes, that\'s really Joss Whedon.', | ||||
|         'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', | ||||
|         'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', | ||||
|         'info_dict': { | ||||
|             'id': '6902724', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Comic-Con Cosplay Catastrophe', | ||||
|             'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.", | ||||
|             'age_limit': 13, | ||||
|             'duration': 187, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         u'url': u'http://www.collegehumor.com/video/3505939/font-conference', | ||||
|         u'file': u'3505939.mp4', | ||||
|         u'md5': u'c51ca16b82bb456a4397987791a835f5', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Font Conference', | ||||
|             u'description': u'This video wasn\'t long enough, so we made it double-spaced.', | ||||
|         'url': 'http://www.collegehumor.com/video/3505939/font-conference', | ||||
|         'md5': '72fa701d8ef38664a4dbb9e2ab721816', | ||||
|         'info_dict': { | ||||
|             'id': '3505939', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Font Conference', | ||||
|             'description': "This video wasn't long enough, so we made it double-spaced.", | ||||
|             'age_limit': 10, | ||||
|             'duration': 179, | ||||
|         }, | ||||
|     }] | ||||
|     }, | ||||
|     # embedded youtube video | ||||
|     { | ||||
|         'url': 'http://www.collegehumor.com/embed/6950306', | ||||
|         'info_dict': { | ||||
|             'id': 'Z-bao9fg6Yc', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!', | ||||
|             'uploader': 'Mark Dice', | ||||
|             'uploader_id': 'MarkDice', | ||||
|             'description': 'md5:62c3dab9351fac7bb44b53b69511d87f', | ||||
|             'upload_date': '20140127', | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|         'add_ie': ['Youtube'], | ||||
|     }, | ||||
|     ] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|         video_id = mobj.group('videoid') | ||||
|  | ||||
|         info = { | ||||
|             'id': video_id, | ||||
|             'uploader': None, | ||||
|             'upload_date': None, | ||||
|         } | ||||
|         jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json' | ||||
|         data = json.loads(self._download_webpage( | ||||
|             jsonUrl, video_id, 'Downloading info JSON')) | ||||
|         vdata = data['video'] | ||||
|         if vdata.get('youtubeId') is not None: | ||||
|             return { | ||||
|                 '_type': 'url', | ||||
|                 'url': vdata['youtubeId'], | ||||
|                 'ie_key': 'Youtube', | ||||
|             } | ||||
|  | ||||
|         self.report_extraction(video_id) | ||||
|         xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id | ||||
|         mdoc = self._download_xml(xmlUrl, video_id, | ||||
|                                          u'Downloading info XML', | ||||
|                                          u'Unable to download video info XML') | ||||
|  | ||||
|         try: | ||||
|             videoNode = mdoc.findall('./video')[0] | ||||
|             youtubeIdNode = videoNode.find('./youtubeID') | ||||
|             if youtubeIdNode is not None: | ||||
|                 return self.url_result(youtubeIdNode.text, 'Youtube') | ||||
|             info['description'] = videoNode.findall('./description')[0].text | ||||
|             info['title'] = videoNode.findall('./caption')[0].text | ||||
|             info['thumbnail'] = videoNode.findall('./thumbnail')[0].text | ||||
|             next_url = videoNode.findall('./file')[0].text | ||||
|         except IndexError: | ||||
|             raise ExtractorError(u'Invalid metadata XML file') | ||||
|  | ||||
|         if next_url.endswith(u'manifest.f4m'): | ||||
|             manifest_url = next_url + '?hdcore=2.10.3' | ||||
|             adoc = self._download_xml(manifest_url, video_id, | ||||
|                                          u'Downloading XML manifest', | ||||
|                                          u'Unable to download video info XML') | ||||
|  | ||||
|             try: | ||||
|                 video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text | ||||
|             except IndexError: | ||||
|                 raise ExtractorError(u'Invalid manifest file') | ||||
|             url_pr = compat_urllib_parse_urlparse(info['thumbnail']) | ||||
|             info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','') | ||||
|             info['ext'] = 'mp4' | ||||
|         AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0} | ||||
|         rating = vdata.get('rating') | ||||
|         if rating: | ||||
|             age_limit = AGE_LIMITS.get(rating.lower()) | ||||
|         else: | ||||
|             # Old-style direct links | ||||
|             info['url'] = next_url | ||||
|             info['ext'] = determine_ext(info['url']) | ||||
|             age_limit = None  # None = No idea | ||||
|  | ||||
|         return info | ||||
|         PREFS = {'high_quality': 2, 'low_quality': 0} | ||||
|         formats = [] | ||||
|         for format_key in ('mp4', 'webm'): | ||||
|             for qname, qurl in vdata.get(format_key, {}).items(): | ||||
|                 formats.append({ | ||||
|                     'format_id': format_key + '_' + qname, | ||||
|                     'url': qurl, | ||||
|                     'format': format_key, | ||||
|                     'preference': PREFS.get(qname), | ||||
|                 }) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         duration = int_or_none(vdata.get('duration'), 1000) | ||||
|         like_count = int_or_none(vdata.get('likes')) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': vdata['title'], | ||||
|             'description': vdata.get('description'), | ||||
|             'thumbnail': vdata.get('thumbnail'), | ||||
|             'formats': formats, | ||||
|             'age_limit': age_limit, | ||||
|             'duration': duration, | ||||
|             'like_count': like_count, | ||||
|         } | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -12,31 +14,25 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class ComedyCentralIE(MTVServicesInfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www.)?comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)' | ||||
|     _FEED_URL = u'http://comedycentral.com/feeds/mrss/' | ||||
|     _VALID_URL = r'''(?x)https?://(?:www\.)?(comedycentral|cc)\.com/ | ||||
|         (video-clips|episodes|cc-studios|video-collections) | ||||
|         /(?P<title>.*)''' | ||||
|     _FEED_URL = 'http://comedycentral.com/feeds/mrss/' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother', | ||||
|         u'md5': u'4167875aae411f903b751a21f357f1ee', | ||||
|         u'info_dict': { | ||||
|             u'id': u'cef0cbb3-e776-4bc9-b62e-8016deccb354', | ||||
|             u'ext': u'mp4', | ||||
|             u'title': u'Uncensored - Greg Fitzsimmons - Too Good of a Mother', | ||||
|             u'description': u'After a certain point, breastfeeding becomes c**kblocking.', | ||||
|         'url': 'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother', | ||||
|         'md5': '4167875aae411f903b751a21f357f1ee', | ||||
|         'info_dict': { | ||||
|             'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother', | ||||
|             'description': 'After a certain point, breastfeeding becomes c**kblocking.', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         title = mobj.group('title') | ||||
|         webpage = self._download_webpage(url, title) | ||||
|         mgid = self._search_regex(r'data-mgid="(?P<mgid>mgid:.*?)"', | ||||
|                                   webpage, u'mgid') | ||||
|         return self._get_videos_info(mgid) | ||||
|  | ||||
|  | ||||
| class ComedyCentralShowsIE(InfoExtractor): | ||||
|     IE_DESC = u'The Daily Show / Colbert Report' | ||||
|     IE_DESC = 'The Daily Show / Colbert Report' | ||||
|     # urls can be abbreviations like :thedailyshow or :colbert | ||||
|     # urls for episodes like: | ||||
|     # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day | ||||
| @@ -53,14 +49,14 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|                               extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?))) | ||||
|                      $""" | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart', | ||||
|         u'file': u'422212.mp4', | ||||
|         u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d', | ||||
|         u'info_dict': { | ||||
|             u"upload_date": u"20121214",  | ||||
|             u"description": u"Kristen Stewart",  | ||||
|             u"uploader": u"thedailyshow",  | ||||
|             u"title": u"thedailyshow-kristen-stewart part 1" | ||||
|         'url': 'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart', | ||||
|         'file': '422212.mp4', | ||||
|         'md5': '4e2f5cb088a83cd8cdb7756132f9739d', | ||||
|         'info_dict': { | ||||
|             "upload_date": "20121214", | ||||
|             "description": "Kristen Stewart", | ||||
|             "uploader": "thedailyshow", | ||||
|             "title": "thedailyshow-kristen-stewart part 1" | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -90,22 +86,22 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|  | ||||
|     @staticmethod | ||||
|     def _transform_rtmp_url(rtmp_video_url): | ||||
|         m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url) | ||||
|         m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url) | ||||
|         if not m: | ||||
|             raise ExtractorError(u'Cannot transform RTMP url') | ||||
|             raise ExtractorError('Cannot transform RTMP url') | ||||
|         base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' | ||||
|         return base + m.group('finalid') | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url, re.VERBOSE) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|             raise ExtractorError('Invalid URL: %s' % url) | ||||
|  | ||||
|         if mobj.group('shortname'): | ||||
|             if mobj.group('shortname') in ('tds', 'thedailyshow'): | ||||
|                 url = u'http://www.thedailyshow.com/full-episodes/' | ||||
|                 url = 'http://www.thedailyshow.com/full-episodes/' | ||||
|             else: | ||||
|                 url = u'http://www.colbertnation.com/full-episodes/' | ||||
|                 url = 'http://www.colbertnation.com/full-episodes/' | ||||
|             mobj = re.match(self._VALID_URL, url, re.VERBOSE) | ||||
|             assert mobj is not None | ||||
|  | ||||
| @@ -131,9 +127,9 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|             url = htmlHandle.geturl() | ||||
|             mobj = re.match(self._VALID_URL, url, re.VERBOSE) | ||||
|             if mobj is None: | ||||
|                 raise ExtractorError(u'Invalid redirected URL: ' + url) | ||||
|                 raise ExtractorError('Invalid redirected URL: ' + url) | ||||
|             if mobj.group('episode') == '': | ||||
|                 raise ExtractorError(u'Redirected URL is still not specific: ' + url) | ||||
|                 raise ExtractorError('Redirected URL is still not specific: ' + url) | ||||
|             epTitle = mobj.group('episode') | ||||
|  | ||||
|         mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage) | ||||
| @@ -145,15 +141,15 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|  | ||||
|             altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage) | ||||
|             if len(altMovieParams) == 0: | ||||
|                 raise ExtractorError(u'unable to find Flash URL in webpage ' + url) | ||||
|                 raise ExtractorError('unable to find Flash URL in webpage ' + url) | ||||
|             else: | ||||
|                 mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] | ||||
|  | ||||
|         uri = mMovieParams[0][1] | ||||
|         indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri}) | ||||
|         idoc = self._download_xml(indexUrl, epTitle, | ||||
|                                           u'Downloading show index', | ||||
|                                           u'unable to download episode index') | ||||
|                                           'Downloading show index', | ||||
|                                           'unable to download episode index') | ||||
|  | ||||
|         results = [] | ||||
|  | ||||
| @@ -168,7 +164,7 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|             configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' + | ||||
|                         compat_urllib_parse.urlencode({'uri': mediaId})) | ||||
|             cdoc = self._download_xml(configUrl, epTitle, | ||||
|                                                u'Downloading configuration for %s' % shortMediaId) | ||||
|                                                'Downloading configuration for %s' % shortMediaId) | ||||
|  | ||||
|             turls = [] | ||||
|             for rendition in cdoc.findall('.//rendition'): | ||||
| @@ -176,7 +172,7 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|                 turls.append(finfo) | ||||
|  | ||||
|             if len(turls) == 0: | ||||
|                 self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found') | ||||
|                 self._downloader.report_error('unable to download ' + mediaId + ': No videos found') | ||||
|                 continue | ||||
|  | ||||
|             formats = [] | ||||
| @@ -190,7 +186,7 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|                     'width': w, | ||||
|                 }) | ||||
|  | ||||
|             effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1) | ||||
|             effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1) | ||||
|             results.append({ | ||||
|                 'id': shortMediaId, | ||||
|                 'formats': formats, | ||||
|   | ||||
| @@ -1,4 +1,6 @@ | ||||
| import base64 | ||||
| import hashlib | ||||
| import json | ||||
| import os | ||||
| import re | ||||
| import socket | ||||
| @@ -9,6 +11,7 @@ import xml.etree.ElementTree | ||||
| from ..utils import ( | ||||
|     compat_http_client, | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_str, | ||||
|  | ||||
|     clean_html, | ||||
| @@ -37,10 +40,12 @@ class InfoExtractor(object): | ||||
|     id:             Video identifier. | ||||
|     title:          Video title, unescaped. | ||||
|  | ||||
|     Additionally, it must contain either a formats entry or url and ext: | ||||
|     Additionally, it must contain either a formats entry or a url one: | ||||
|  | ||||
|     formats:        A list of dictionaries for each format available, it must | ||||
|                     be ordered from worst to best quality. Potential fields: | ||||
|     formats:        A list of dictionaries for each format available, ordered | ||||
|                     from worst to best quality. | ||||
|  | ||||
|                     Potential fields: | ||||
|                     * url        Mandatory. The URL of the video file | ||||
|                     * ext        Will be calculated from url if missing | ||||
|                     * format     A human-readable description of the format | ||||
| @@ -48,32 +53,53 @@ class InfoExtractor(object): | ||||
|                                  Calculated from the format_id, width, height. | ||||
|                                  and format_note fields if missing. | ||||
|                     * format_id  A short description of the format | ||||
|                                  ("mp4_h264_opus" or "19") | ||||
|                                  ("mp4_h264_opus" or "19"). | ||||
|                                 Technically optional, but strongly recommended. | ||||
|                     * format_note Additional info about the format | ||||
|                                  ("3D" or "DASH video") | ||||
|                     * width      Width of the video, if known | ||||
|                     * height     Height of the video, if known | ||||
|                     * resolution Textual description of width and height | ||||
|                     * tbr        Average bitrate of audio and video in KBit/s | ||||
|                     * abr        Average audio bitrate in KBit/s | ||||
|                     * acodec     Name of the audio codec in use | ||||
|                     * asr        Audio sampling rate in Hertz | ||||
|                     * vbr        Average video bitrate in KBit/s | ||||
|                     * vcodec     Name of the video codec in use | ||||
|                     * container  Name of the container format | ||||
|                     * filesize   The number of bytes, if known in advance | ||||
|                     * player_url SWF Player URL (used for rtmpdump). | ||||
|                     * protocol   The protocol that will be used for the actual | ||||
|                                  download, lower-case. | ||||
|                                  "http", "https", "rtsp", "rtmp", "m3u8" or so. | ||||
|                     * preference Order number of this format. If this field is | ||||
|                                  present and not None, the formats get sorted | ||||
|                                  by this field. | ||||
|                                  -1 for default (order by other properties), | ||||
|                                  -2 or smaller for less than default. | ||||
|                     * quality    Order number of the video quality of this | ||||
|                                  format, irrespective of the file format. | ||||
|                                  -1 for default (order by other properties), | ||||
|                                  -2 or smaller for less than default. | ||||
|     url:            Final video URL. | ||||
|     ext:            Video filename extension. | ||||
|     format:         The video format, defaults to ext (used for --get-format) | ||||
|     player_url:     SWF Player URL (used for rtmpdump). | ||||
|     urlhandle:      [internal] The urlHandle to be used to download the file, | ||||
|                     like returned by urllib.request.urlopen | ||||
|  | ||||
|     The following fields are optional: | ||||
|  | ||||
|     display_id      An alternative identifier for the video, not necessarily | ||||
|                     unique, but available before title. Typically, id is | ||||
|                     something like "4234987", title "Dancing naked mole rats", | ||||
|                     and display_id "dancing-naked-mole-rats" | ||||
|     thumbnails:     A list of dictionaries (with the entries "resolution" and | ||||
|                     "url") for the varying thumbnails | ||||
|     thumbnail:      Full URL to a video thumbnail image. | ||||
|     description:    One-line video description. | ||||
|     uploader:       Full name of the video uploader. | ||||
|     timestamp:      UNIX timestamp of the moment the video became available. | ||||
|     upload_date:    Video upload date (YYYYMMDD). | ||||
|                     If not explicitly set, calculated from timestamp. | ||||
|     uploader_id:    Nickname or id of the video uploader. | ||||
|     location:       Physical location of the video. | ||||
|     subtitles:      The subtitle file contents as a dictionary in the format | ||||
| @@ -94,9 +120,6 @@ class InfoExtractor(object): | ||||
|     _real_extract() methods and define a _VALID_URL regexp. | ||||
|     Probably, they should also be added to the list of extractors. | ||||
|  | ||||
|     _real_extract() must return a *list* of information dictionaries as | ||||
|     described above. | ||||
|  | ||||
|     Finally, the _WORKING attribute should be set to False for broken IEs | ||||
|     in order to warn the users and skip the tests. | ||||
|     """ | ||||
| @@ -202,6 +225,8 @@ class InfoExtractor(object): | ||||
|                           webpage_bytes[:1024]) | ||||
|             if m: | ||||
|                 encoding = m.group(1).decode('ascii') | ||||
|             elif webpage_bytes.startswith(b'\xff\xfe'): | ||||
|                 encoding = 'utf-16' | ||||
|             else: | ||||
|                 encoding = 'utf-8' | ||||
|         if self._downloader.params.get('dump_intermediate_pages', False): | ||||
| @@ -217,6 +242,9 @@ class InfoExtractor(object): | ||||
|                 url = url_or_request.get_full_url() | ||||
|             except AttributeError: | ||||
|                 url = url_or_request | ||||
|             if len(url) > 200: | ||||
|                 h = u'___' + hashlib.md5(url.encode('utf-8')).hexdigest() | ||||
|                 url = url[:200 - len(h)] + h | ||||
|             raw_filename = ('%s_%s.dump' % (video_id, url)) | ||||
|             filename = sanitize_filename(raw_filename, restricted=True) | ||||
|             self.to_screen(u'Saving request to ' + filename) | ||||
| @@ -244,6 +272,23 @@ class InfoExtractor(object): | ||||
|             xml_string = transform_source(xml_string) | ||||
|         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) | ||||
|  | ||||
|     def _download_json(self, url_or_request, video_id, | ||||
|                        note=u'Downloading JSON metadata', | ||||
|                        errnote=u'Unable to download JSON metadata', | ||||
|                        transform_source=None): | ||||
|         json_string = self._download_webpage(url_or_request, video_id, note, errnote) | ||||
|         if transform_source: | ||||
|             json_string = transform_source(json_string) | ||||
|         try: | ||||
|             return json.loads(json_string) | ||||
|         except ValueError as ve: | ||||
|             raise ExtractorError('Failed to download JSON', cause=ve) | ||||
|  | ||||
|     def report_warning(self, msg, video_id=None): | ||||
|         idstr = u'' if video_id is None else u'%s: ' % video_id | ||||
|         self._downloader.report_warning( | ||||
|             u'[%s] %s%s' % (self.IE_NAME, idstr, msg)) | ||||
|  | ||||
|     def to_screen(self, msg): | ||||
|         """Print msg to screen, prefixing it with '[ie_name]'""" | ||||
|         self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg)) | ||||
| @@ -360,8 +405,8 @@ class InfoExtractor(object): | ||||
|     # Helper functions for extracting OpenGraph info | ||||
|     @staticmethod | ||||
|     def _og_regexes(prop): | ||||
|         content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')' | ||||
|         property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop) | ||||
|         content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')' | ||||
|         property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop) | ||||
|         template = r'<meta[^>]+?%s[^>]+?%s' | ||||
|         return [ | ||||
|             template % (property_re, content_re), | ||||
| @@ -390,14 +435,14 @@ class InfoExtractor(object): | ||||
|         if secure: regexes = self._og_regexes('video:secure_url') + regexes | ||||
|         return self._html_search_regex(regexes, html, name, **kargs) | ||||
|  | ||||
|     def _html_search_meta(self, name, html, display_name=None): | ||||
|     def _html_search_meta(self, name, html, display_name=None, fatal=False): | ||||
|         if display_name is None: | ||||
|             display_name = name | ||||
|         return self._html_search_regex( | ||||
|             r'''(?ix)<meta | ||||
|                     (?=[^>]+(?:itemprop|name|property)=["\']%s["\']) | ||||
|                     [^>]+content=["\']([^"\']+)["\']''' % re.escape(name), | ||||
|             html, display_name, fatal=False) | ||||
|             html, display_name, fatal=fatal) | ||||
|  | ||||
|     def _dc_search_uploader(self, html): | ||||
|         return self._html_search_meta('dc.creator', html, 'uploader') | ||||
| @@ -426,6 +471,65 @@ class InfoExtractor(object): | ||||
|         } | ||||
|         return RATING_TABLE.get(rating.lower(), None) | ||||
|  | ||||
|     def _twitter_search_player(self, html): | ||||
|         return self._html_search_meta('twitter:player', html, | ||||
|             'twitter card player') | ||||
|  | ||||
|     def _sort_formats(self, formats): | ||||
|         if not formats: | ||||
|             raise ExtractorError(u'No video formats found') | ||||
|  | ||||
|         def _formats_key(f): | ||||
|             # TODO remove the following workaround | ||||
|             from ..utils import determine_ext | ||||
|             if not f.get('ext') and 'url' in f: | ||||
|                 f['ext'] = determine_ext(f['url']) | ||||
|  | ||||
|             preference = f.get('preference') | ||||
|             if preference is None: | ||||
|                 proto = f.get('protocol') | ||||
|                 if proto is None: | ||||
|                     proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme | ||||
|  | ||||
|                 preference = 0 if proto in ['http', 'https'] else -0.1 | ||||
|                 if f.get('ext') in ['f4f', 'f4m']:  # Not yet supported | ||||
|                     preference -= 0.5 | ||||
|  | ||||
|             if f.get('vcodec') == 'none':  # audio only | ||||
|                 if self._downloader.params.get('prefer_free_formats'): | ||||
|                     ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus'] | ||||
|                 else: | ||||
|                     ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a'] | ||||
|                 ext_preference = 0 | ||||
|                 try: | ||||
|                     audio_ext_preference = ORDER.index(f['ext']) | ||||
|                 except ValueError: | ||||
|                     audio_ext_preference = -1 | ||||
|             else: | ||||
|                 if self._downloader.params.get('prefer_free_formats'): | ||||
|                     ORDER = [u'flv', u'mp4', u'webm'] | ||||
|                 else: | ||||
|                     ORDER = [u'webm', u'flv', u'mp4'] | ||||
|                 try: | ||||
|                     ext_preference = ORDER.index(f['ext']) | ||||
|                 except ValueError: | ||||
|                     ext_preference = -1 | ||||
|                 audio_ext_preference = 0 | ||||
|  | ||||
|             return ( | ||||
|                 preference, | ||||
|                 f.get('quality') if f.get('quality') is not None else -1, | ||||
|                 f.get('height') if f.get('height') is not None else -1, | ||||
|                 f.get('width') if f.get('width') is not None else -1, | ||||
|                 ext_preference, | ||||
|                 f.get('tbr') if f.get('tbr') is not None else -1, | ||||
|                 f.get('vbr') if f.get('vbr') is not None else -1, | ||||
|                 f.get('abr') if f.get('abr') is not None else -1, | ||||
|                 audio_ext_preference, | ||||
|                 f.get('filesize') if f.get('filesize') is not None else -1, | ||||
|                 f.get('format_id'), | ||||
|             ) | ||||
|         formats.sort(key=_formats_key) | ||||
|  | ||||
|  | ||||
| class SearchInfoExtractor(InfoExtractor): | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
| @@ -20,30 +21,31 @@ class CondeNastIE(InfoExtractor): | ||||
|  | ||||
|     # The keys are the supported sites and the values are the name to be shown | ||||
|     # to the user and in the extractor description. | ||||
|     _SITES = {'wired': u'WIRED', | ||||
|               'gq': u'GQ', | ||||
|               'vogue': u'Vogue', | ||||
|               'glamour': u'Glamour', | ||||
|               'wmagazine': u'W Magazine', | ||||
|               'vanityfair': u'Vanity Fair', | ||||
|               } | ||||
|     _SITES = { | ||||
|         'wired': 'WIRED', | ||||
|         'gq': 'GQ', | ||||
|         'vogue': 'Vogue', | ||||
|         'glamour': 'Glamour', | ||||
|         'wmagazine': 'W Magazine', | ||||
|         'vanityfair': 'Vanity Fair', | ||||
|     } | ||||
|  | ||||
|     _VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys()) | ||||
|     IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) | ||||
|     _VALID_URL = r'http://(video|www)\.(?P<site>%s)\.com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys()) | ||||
|     IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', | ||||
|         u'file': u'5171b343c2b4c00dd0c1ccb3.mp4', | ||||
|         u'md5': u'1921f713ed48aabd715691f774c451f7', | ||||
|         u'info_dict': { | ||||
|             u'title': u'3D Printed Speakers Lit With LED', | ||||
|             u'description': u'Check out these beautiful 3D printed LED speakers.  You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', | ||||
|         'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', | ||||
|         'file': '5171b343c2b4c00dd0c1ccb3.mp4', | ||||
|         'md5': '1921f713ed48aabd715691f774c451f7', | ||||
|         'info_dict': { | ||||
|             'title': '3D Printed Speakers Lit With LED', | ||||
|             'description': 'Check out these beautiful 3D printed LED speakers.  You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _extract_series(self, url, webpage): | ||||
|         title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>', | ||||
|                                         webpage, u'series title', flags=re.DOTALL) | ||||
|                                         webpage, 'series title', flags=re.DOTALL) | ||||
|         url_object = compat_urllib_parse_urlparse(url) | ||||
|         base_url = '%s://%s' % (url_object.scheme, url_object.netloc) | ||||
|         m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', | ||||
| @@ -57,39 +59,41 @@ class CondeNastIE(InfoExtractor): | ||||
|         description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>', | ||||
|                                                r'<div class="video-post-content">(.+?)</div>', | ||||
|                                                ], | ||||
|                                               webpage, u'description', | ||||
|                                               webpage, 'description', | ||||
|                                               fatal=False, flags=re.DOTALL) | ||||
|         params = self._search_regex(r'var params = {(.+?)}[;,]', webpage, | ||||
|                                     u'player params', flags=re.DOTALL) | ||||
|         video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id') | ||||
|         player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id') | ||||
|         target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target') | ||||
|                                     'player params', flags=re.DOTALL) | ||||
|         video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id') | ||||
|         player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id') | ||||
|         target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target') | ||||
|         data = compat_urllib_parse.urlencode({'videoId': video_id, | ||||
|                                               'playerId': player_id, | ||||
|                                               'target': target, | ||||
|                                               }) | ||||
|         base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]', | ||||
|                                            webpage, u'base info url', | ||||
|                                            webpage, 'base info url', | ||||
|                                            default='http://player.cnevids.com/player/loader.js?') | ||||
|         info_url = base_info_url + data | ||||
|         info_page = self._download_webpage(info_url, video_id, | ||||
|                                            u'Downloading video info') | ||||
|         video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info') | ||||
|                                            'Downloading video info') | ||||
|         video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info') | ||||
|         video_info = json.loads(video_info) | ||||
|  | ||||
|         def _formats_sort_key(f): | ||||
|             type_ord = 1 if f['type'] == 'video/mp4' else 0 | ||||
|             quality_ord = 1 if f['quality'] == 'high' else 0 | ||||
|             return (quality_ord, type_ord) | ||||
|         best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1] | ||||
|         formats = [{ | ||||
|             'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']), | ||||
|             'url': fdata['src'], | ||||
|             'ext': fdata['type'].split('/')[-1], | ||||
|             'quality': 1 if fdata['quality'] == 'high' else 0, | ||||
|         } for fdata in video_info['sources'][0]] | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return {'id': video_id, | ||||
|                 'url': best_format['src'], | ||||
|                 'ext': best_format['type'].split('/')[-1], | ||||
|                 'title': video_info['title'], | ||||
|                 'thumbnail': video_info['poster_frame'], | ||||
|                 'description': description, | ||||
|                 } | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'formats': formats, | ||||
|             'title': video_info['title'], | ||||
|             'thumbnail': video_info['poster_frame'], | ||||
|             'description': description, | ||||
|         } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|   | ||||
| @@ -1,5 +1,11 @@ | ||||
| # encoding: utf-8 | ||||
| import re, base64, zlib | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
| import base64 | ||||
| import zlib | ||||
|  | ||||
| from hashlib import sha1 | ||||
| from math import pow, sqrt, floor | ||||
| from .common import InfoExtractor | ||||
| @@ -17,30 +23,32 @@ from ..aes import ( | ||||
|     inc, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class CrunchyrollIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:https?://)?(?:www\.)?(?P<url>crunchyroll\.com/[^/]*/[^/?&]*?(?P<video_id>[0-9]+))(?:[/?&]|$)' | ||||
|     _TESTS = [{ | ||||
|         u'url': u'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', | ||||
|         u'file': u'645513.flv', | ||||
|         #u'md5': u'b1639fd6ddfaa43788c85f6d1dddd412', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', | ||||
|             u'description': u'md5:2d17137920c64f2f49981a7797d275ef', | ||||
|             u'thumbnail': u'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', | ||||
|             u'uploader': u'Yomiuri Telecasting Corporation (YTV)', | ||||
|             u'upload_date': u'20131013', | ||||
|     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', | ||||
|         #'md5': 'b1639fd6ddfaa43788c85f6d1dddd412', | ||||
|         'info_dict': { | ||||
|             'id': '645513', | ||||
|             'ext': 'flv', | ||||
|             'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', | ||||
|             'description': 'md5:2d17137920c64f2f49981a7797d275ef', | ||||
|             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', | ||||
|             'uploader': 'Yomiuri Telecasting Corporation (YTV)', | ||||
|             'upload_date': '20131013', | ||||
|         }, | ||||
|         u'params': { | ||||
|         'params': { | ||||
|             # rtmp | ||||
|             u'skip_download': True, | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|     }] | ||||
|     } | ||||
|  | ||||
|     _FORMAT_IDS = { | ||||
|         u'360': (u'60', u'106'), | ||||
|         u'480': (u'61', u'106'), | ||||
|         u'720': (u'62', u'106'), | ||||
|         u'1080': (u'80', u'108'), | ||||
|         '360': ('60', '106'), | ||||
|         '480': ('61', '106'), | ||||
|         '720': ('62', '106'), | ||||
|         '1080': ('80', '108'), | ||||
|     } | ||||
|  | ||||
|     def _decrypt_subtitles(self, data, iv, id): | ||||
| @@ -63,10 +71,10 @@ class CrunchyrollIE(InfoExtractor): | ||||
|             num3 = key ^ num1 | ||||
|             num4 = num3 ^ (num3 >> 3) ^ num2 | ||||
|             prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) | ||||
|             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode(u'ascii')).digest()) | ||||
|             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest()) | ||||
|             # Extend 160 Bit hash to 256 Bit | ||||
|             return shaHash + [0] * 12 | ||||
|          | ||||
|  | ||||
|         key = obfuscate_key(id) | ||||
|         class Counter: | ||||
|             __value = iv | ||||
| @@ -78,94 +86,103 @@ class CrunchyrollIE(InfoExtractor): | ||||
|         return zlib.decompress(decrypted_data) | ||||
|  | ||||
|     def _convert_subtitles_to_srt(self, subtitles): | ||||
|         i=1 | ||||
|         output = u'' | ||||
|         for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles): | ||||
|             start = start.replace(u'.', u',') | ||||
|             end = end.replace(u'.', u',') | ||||
|         output = '' | ||||
|         for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1): | ||||
|             start = start.replace('.', ',') | ||||
|             end = end.replace('.', ',') | ||||
|             text = clean_html(text) | ||||
|             text = text.replace(u'\\N', u'\n') | ||||
|             text = text.replace('\\N', '\n') | ||||
|             if not text: | ||||
|                 continue | ||||
|             output += u'%d\n%s --> %s\n%s\n\n' % (i, start, end, text) | ||||
|             i+=1 | ||||
|             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) | ||||
|         return output | ||||
|  | ||||
|     def _real_extract(self,url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('video_id') | ||||
|  | ||||
|         webpage_url = u'http://www.' + mobj.group('url') | ||||
|         video_id = mobj.group(u'video_id') | ||||
|         webpage = self._download_webpage(webpage_url, video_id) | ||||
|         note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, u'trailer-notice', default=u'') | ||||
|         if mobj.group('prefix') == 'm': | ||||
|             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage') | ||||
|             webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url') | ||||
|         else: | ||||
|             webpage_url = 'http://www.' + mobj.group('url') | ||||
|  | ||||
|         webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage') | ||||
|         note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='') | ||||
|         if note_m: | ||||
|             raise ExtractorError(note_m) | ||||
|  | ||||
|         video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, u'video_title', flags=re.DOTALL) | ||||
|         video_title = re.sub(r' {2,}', u' ', video_title) | ||||
|         video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, u'video_description', default=u'') | ||||
|         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage) | ||||
|         if mobj: | ||||
|             msg = json.loads(mobj.group('msg')) | ||||
|             if msg.get('type') == 'error': | ||||
|                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) | ||||
|  | ||||
|         video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL) | ||||
|         video_title = re.sub(r' {2,}', ' ', video_title) | ||||
|         video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='') | ||||
|         if not video_description: | ||||
|             video_description = None | ||||
|         video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, u'video_upload_date', fatal=False, flags=re.DOTALL) | ||||
|         video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) | ||||
|         if video_upload_date: | ||||
|             video_upload_date = unified_strdate(video_upload_date) | ||||
|         video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, u'video_uploader', fatal=False, flags=re.DOTALL) | ||||
|         video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL) | ||||
|  | ||||
|         playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, u'playerdata_url')) | ||||
|         playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) | ||||
|         playerdata_req = compat_urllib_request.Request(playerdata_url) | ||||
|         playerdata_req.data = compat_urllib_parse.urlencode({u'current_page': webpage_url}) | ||||
|         playerdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded') | ||||
|         playerdata = self._download_webpage(playerdata_req, video_id, note=u'Downloading media info') | ||||
|          | ||||
|         stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, u'stream_id') | ||||
|         video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, u'thumbnail', fatal=False) | ||||
|         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) | ||||
|         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') | ||||
|  | ||||
|         stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id') | ||||
|         video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False) | ||||
|  | ||||
|         formats = [] | ||||
|         for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): | ||||
|             stream_quality, stream_format = self._FORMAT_IDS[fmt] | ||||
|             video_format = fmt+u'p' | ||||
|             streamdata_req = compat_urllib_request.Request(u'http://www.crunchyroll.com/xml/') | ||||
|             video_format = fmt+'p' | ||||
|             streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') | ||||
|             # urlencode doesn't work! | ||||
|             streamdata_req.data = u'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+u'&media%5Fid='+stream_id+u'&video%5Fformat='+stream_format | ||||
|             streamdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded') | ||||
|             streamdata_req.add_header(u'Content-Length', str(len(streamdata_req.data))) | ||||
|             streamdata = self._download_webpage(streamdata_req, video_id, note=u'Downloading media info for '+video_format) | ||||
|             video_url = self._search_regex(r'<host>([^<]+)', streamdata, u'video_url') | ||||
|             video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, u'video_play_path') | ||||
|             streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format | ||||
|             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|             streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) | ||||
|             streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format) | ||||
|             video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url') | ||||
|             video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path') | ||||
|             formats.append({ | ||||
|                 u'url': video_url, | ||||
|                 u'play_path':   video_play_path, | ||||
|                 u'ext': 'flv', | ||||
|                 u'format': video_format, | ||||
|                 u'format_id': video_format, | ||||
|                 'url': video_url, | ||||
|                 'play_path':   video_play_path, | ||||
|                 'ext': 'flv', | ||||
|                 'format': video_format, | ||||
|                 'format_id': video_format, | ||||
|             }) | ||||
|  | ||||
|         subtitles = {} | ||||
|         for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): | ||||
|             sub_page = self._download_webpage(u'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ | ||||
|                                               video_id, note=u'Downloading subtitles for '+sub_name) | ||||
|             id = self._search_regex(r'id=\'([0-9]+)', sub_page, u'subtitle_id', fatal=False) | ||||
|             iv = self._search_regex(r'<iv>([^<]+)', sub_page, u'subtitle_iv', fatal=False) | ||||
|             data = self._search_regex(r'<data>([^<]+)', sub_page, u'subtitle_data', fatal=False) | ||||
|             sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ | ||||
|                                               video_id, note='Downloading subtitles for '+sub_name) | ||||
|             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) | ||||
|             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) | ||||
|             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) | ||||
|             if not id or not iv or not data: | ||||
|                 continue | ||||
|             id = int(id) | ||||
|             iv = base64.b64decode(iv) | ||||
|             data = base64.b64decode(data) | ||||
|  | ||||
|             subtitle = self._decrypt_subtitles(data, iv, id).decode(u'utf-8') | ||||
|             lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, u'subtitle_lang_code', fatal=False) | ||||
|             subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8') | ||||
|             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) | ||||
|             if not lang_code: | ||||
|                 continue | ||||
|             subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) | ||||
|  | ||||
|         return { | ||||
|             u'id':          video_id, | ||||
|             u'title':       video_title, | ||||
|             u'description': video_description, | ||||
|             u'thumbnail':   video_thumbnail, | ||||
|             u'uploader':    video_uploader, | ||||
|             u'upload_date': video_upload_date, | ||||
|             u'subtitles':   subtitles, | ||||
|             u'formats':     formats, | ||||
|             'id':          video_id, | ||||
|             'title':       video_title, | ||||
|             'description': video_description, | ||||
|             'thumbnail':   video_thumbnail, | ||||
|             'uploader':    video_uploader, | ||||
|             'upload_date': video_upload_date, | ||||
|             'subtitles':   subtitles, | ||||
|             'formats':     formats, | ||||
|         } | ||||
|   | ||||
| @@ -1,51 +1,71 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_parse, | ||||
|     unescapeHTML, | ||||
|     find_xpath_attr, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class CSpanIE(InfoExtractor): | ||||
|     _VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.c-spanvideo.org/program/HolderonV', | ||||
|         u'file': u'315139.flv', | ||||
|         u'md5': u'74a623266956f69e4df0068ab6c80fe4', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Attorney General Eric Holder on Voting Rights Act Decision" | ||||
|     _VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)' | ||||
|     IE_DESC = 'C-SPAN' | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://www.c-span.org/video/?313572-1/HolderonV', | ||||
|         'md5': '8e44ce11f0f725527daccc453f553eb0', | ||||
|         'info_dict': { | ||||
|             'id': '315139', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Attorney General Eric Holder on Voting Rights Act Decision', | ||||
|             'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.', | ||||
|         }, | ||||
|         u'skip': u'Requires rtmpdump' | ||||
|     } | ||||
|         'skip': 'Regularly fails on travis, for unknown reasons', | ||||
|     }, { | ||||
|         'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models', | ||||
|         # For whatever reason, the served vide oalternates between | ||||
|         # two different ones | ||||
|         #'md5': 'dbb0f047376d457f2ab8b3929cbb2d0c', | ||||
|         'info_dict': { | ||||
|             'id': '340723', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'International Health Care Models', | ||||
|             'description': 'md5:7a985a2d595dba00af3d9c9f0783c967', | ||||
|         } | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         prog_name = mobj.group(1) | ||||
|         webpage = self._download_webpage(url, prog_name) | ||||
|         video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id') | ||||
|         data = compat_urllib_parse.urlencode({'programid': video_id, | ||||
|                                               'dynamic':'1'}) | ||||
|         info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data | ||||
|         video_info = self._download_webpage(info_url, video_id, u'Downloading video info') | ||||
|         page_id = mobj.group('id') | ||||
|         webpage = self._download_webpage(url, page_id) | ||||
|         video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id') | ||||
|  | ||||
|         self.report_extraction(video_id) | ||||
|         description = self._html_search_regex( | ||||
|             [ | ||||
|                 # The full description | ||||
|                 r'<div class=\'expandable\'>(.*?)<a href=\'#\'', | ||||
|                 # If the description is small enough the other div is not | ||||
|                 # present, otherwise this is a stripped version | ||||
|                 r'<p class=\'initial\'>(.*?)</p>' | ||||
|             ], | ||||
|             webpage, 'description', flags=re.DOTALL) | ||||
|  | ||||
|         title = self._html_search_regex(r'<string name="title">(.*?)</string>', | ||||
|                                         video_info, 'title') | ||||
|         description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"', | ||||
|                                               webpage, 'description', | ||||
|                                               flags=re.MULTILINE|re.DOTALL) | ||||
|         info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id | ||||
|         data = self._download_json(info_url, video_id) | ||||
|  | ||||
|         url = self._search_regex(r'<string name="URL">(.*?)</string>', | ||||
|                                  video_info, 'video url') | ||||
|         url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443') | ||||
|         path = self._search_regex(r'<string name="path">(.*?)</string>', | ||||
|                             video_info, 'rtmp play path') | ||||
|         url = unescapeHTML(data['video']['files'][0]['path']['#text']) | ||||
|  | ||||
|         return {'id': video_id, | ||||
|                 'title': title, | ||||
|                 'ext': 'flv', | ||||
|                 'url': url, | ||||
|                 'play_path': path, | ||||
|                 'description': description, | ||||
|                 'thumbnail': self._og_search_thumbnail(webpage), | ||||
|                 } | ||||
|         doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id, | ||||
|             video_id) | ||||
|  | ||||
|         def find_string(s): | ||||
|             return find_xpath_attr(doc, './/string', 'name', s).text | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': find_string('title'), | ||||
|             'url': url, | ||||
|             'description': description, | ||||
|             'thumbnail': find_string('poster'), | ||||
|         } | ||||
|   | ||||
| @@ -1,22 +1,25 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .canalplus import CanalplusIE | ||||
|  | ||||
|  | ||||
| class D8IE(CanalplusIE): | ||||
|     _VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)' | ||||
|     _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s' | ||||
|     IE_NAME = u'd8.tv' | ||||
|     IE_NAME = 'd8.tv' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html', | ||||
|         u'file': u'966289.flv', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Campagne intime - Documentaire exceptionnel', | ||||
|             u'description': u'md5:d2643b799fb190846ae09c61e59a859f', | ||||
|             u'upload_date': u'20131108', | ||||
|         'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html', | ||||
|         'file': '966289.flv', | ||||
|         'info_dict': { | ||||
|             'title': 'Campagne intime - Documentaire exceptionnel', | ||||
|             'description': 'md5:d2643b799fb190846ae09c61e59a859f', | ||||
|             'upload_date': '20131108', | ||||
|         }, | ||||
|         u'params': { | ||||
|         'params': { | ||||
|             # rtmp | ||||
|             u'skip_download': True, | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|         'skip': 'videos get deleted after a while', | ||||
|     } | ||||
|   | ||||
| @@ -12,6 +12,7 @@ from ..utils import ( | ||||
|     get_element_by_id, | ||||
|     orderedSet, | ||||
|     str_to_int, | ||||
|     int_or_none, | ||||
|  | ||||
|     ExtractorError, | ||||
| ) | ||||
| @@ -124,7 +125,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): | ||||
|             if video_url is not None: | ||||
|                 m_size = re.search(r'H264-(\d+)x(\d+)', video_url) | ||||
|                 if m_size is not None: | ||||
|                     width, height = m_size.group(1), m_size.group(2) | ||||
|                     width, height = map(int_or_none, (m_size.group(1), m_size.group(2))) | ||||
|                 else: | ||||
|                     width, height = None, None | ||||
|                 formats.append({ | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -5,15 +7,14 @@ from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class DefenseGouvFrIE(InfoExtractor): | ||||
|     _IE_NAME = 'defense.gouv.fr' | ||||
|     IE_NAME = 'defense.gouv.fr' | ||||
|     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' | ||||
|         r'ligthboxvideo/base-de-medias/webtv/(.*)') | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/' | ||||
|         u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'), | ||||
|         u'file': u'11213.mp4', | ||||
|         u'md5': u'75bba6124da7e63d2d60b5244ec9430c', | ||||
|         'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', | ||||
|         'file': '11213.mp4', | ||||
|         'md5': '75bba6124da7e63d2d60b5244ec9430c', | ||||
|         "info_dict": { | ||||
|             "title": "attaque-chimique-syrienne-du-21-aout-2013-1" | ||||
|         } | ||||
|   | ||||
| @@ -1,60 +0,0 @@ | ||||
| import re | ||||
| import os | ||||
| import socket | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_http_client, | ||||
|     compat_str, | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_request, | ||||
|  | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class DepositFilesIE(InfoExtractor): | ||||
|     """Information extractor for depositfiles.com""" | ||||
|  | ||||
|     _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         file_id = url.split('/')[-1] | ||||
|         # Rebuild url in english locale | ||||
|         url = 'http://depositfiles.com/en/files/' + file_id | ||||
|  | ||||
|         # Retrieve file webpage with 'Free download' button pressed | ||||
|         free_download_indication = {'gateway_result' : '1'} | ||||
|         request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication)) | ||||
|         try: | ||||
|             self.report_download_webpage(file_id) | ||||
|             webpage = compat_urllib_request.urlopen(request).read() | ||||
|         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||
|             raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err)) | ||||
|  | ||||
|         # Search for the real file URL | ||||
|         mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) | ||||
|         if (mobj is None) or (mobj.group(1) is None): | ||||
|             # Try to figure out reason of the error. | ||||
|             mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) | ||||
|             if (mobj is not None) and (mobj.group(1) is not None): | ||||
|                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() | ||||
|                 raise ExtractorError(u'%s' % restriction_message) | ||||
|             else: | ||||
|                 raise ExtractorError(u'Unable to extract download URL from: %s' % url) | ||||
|  | ||||
|         file_url = mobj.group(1) | ||||
|         file_extension = os.path.splitext(file_url)[1][1:] | ||||
|  | ||||
|         # Search for file title | ||||
|         file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title') | ||||
|  | ||||
|         return [{ | ||||
|             'id':       file_id.decode('utf-8'), | ||||
|             'url':      file_url.decode('utf-8'), | ||||
|             'uploader': None, | ||||
|             'upload_date':  None, | ||||
|             'title':    file_title, | ||||
|             'ext':      file_extension.decode('utf-8'), | ||||
|         }] | ||||
							
								
								
									
										46
									
								
								youtube_dl/extractor/discovery.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								youtube_dl/extractor/discovery.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class DiscoveryIE(InfoExtractor): | ||||
|     _VALID_URL = r'http://dsc\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?' | ||||
|     _TEST = { | ||||
|         'url': 'http://dsc.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm', | ||||
|         'file': '614784.mp4', | ||||
|         'md5': 'e12614f9ee303a6ccef415cb0793eba2', | ||||
|         'info_dict': { | ||||
|             'title': 'MythBusters: Mission Impossible Outtakes', | ||||
|             'description': ('Watch Jamie Hyneman and Adam Savage practice being' | ||||
|                 ' each other -- to the point of confusing Jamie\'s dog -- and ' | ||||
|                 'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s' | ||||
|                 ' back.'), | ||||
|             'duration': 156, | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         video_list_json = self._search_regex(r'var videoListJSON = ({.*?});', | ||||
|             webpage, 'video list', flags=re.DOTALL) | ||||
|         video_list = json.loads(video_list_json) | ||||
|         info = video_list['clips'][0] | ||||
|         formats = [] | ||||
|         for f in info['mp4']: | ||||
|             formats.append( | ||||
|                 {'url': f['src'], r'ext': r'mp4', 'tbr': int(f['bitrate'][:-1])}) | ||||
|  | ||||
|         return { | ||||
|             'id': info['contentId'], | ||||
|             'title': video_list['name'], | ||||
|             'formats': formats, | ||||
|             'description': info['videoCaption'], | ||||
|             'thumbnail': info.get('videoStillURL') or info.get('thumbnailURL'), | ||||
|             'duration': info['duration'], | ||||
|         } | ||||
| @@ -1,41 +1,42 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
| import time | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class DotsubIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)' | ||||
|     _VALID_URL = r'http://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27', | ||||
|         u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv', | ||||
|         u'md5': u'0914d4d69605090f623b7ac329fea66e', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary", | ||||
|             u"uploader": u"4v4l0n42", | ||||
|             u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism  and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com', | ||||
|             u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p', | ||||
|             u'upload_date': u'20101213', | ||||
|         'url': 'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27', | ||||
|         'md5': '0914d4d69605090f623b7ac329fea66e', | ||||
|         'info_dict': { | ||||
|             'id': 'aed3b8b2-1889-4df5-ae63-ad85f5572f27', | ||||
|             'ext': 'flv', | ||||
|             'title': 'Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary', | ||||
|             'uploader': '4v4l0n42', | ||||
|             'description': 'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism  and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com', | ||||
|             'thumbnail': 'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p', | ||||
|             'upload_date': '20101213', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group(1) | ||||
|         info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id) | ||||
|         webpage = self._download_webpage(info_url, video_id) | ||||
|         info = json.loads(webpage) | ||||
|         video_id = mobj.group('id') | ||||
|         info_url = "https://dotsub.com/api/media/%s/metadata" % video_id | ||||
|         info = self._download_json(info_url, video_id) | ||||
|         date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds | ||||
|  | ||||
|         return [{ | ||||
|             'id':          video_id, | ||||
|             'url':         info['mediaURI'], | ||||
|             'ext':         'flv', | ||||
|             'title':       info['title'], | ||||
|             'thumbnail':   info['screenshotURI'], | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': info['mediaURI'], | ||||
|             'ext': 'flv', | ||||
|             'title': info['title'], | ||||
|             'thumbnail': info['screenshotURI'], | ||||
|             'description': info['description'], | ||||
|             'uploader':    info['user'], | ||||
|             'view_count':  info['numberOfViews'], | ||||
|             'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday), | ||||
|         }] | ||||
|             'uploader': info['user'], | ||||
|             'view_count': info['numberOfViews'], | ||||
|             'upload_date': '%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday), | ||||
|         } | ||||
|   | ||||
| @@ -4,18 +4,17 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     unified_strdate, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class DreiSatIE(InfoExtractor): | ||||
|     IE_NAME = '3sat' | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/index\.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$' | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$' | ||||
|     _TEST = { | ||||
|         u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983", | ||||
|         u'file': u'36983.webm', | ||||
|         u'md5': u'57c97d0469d71cf874f6815aa2b7c944', | ||||
|         u'file': u'36983.mp4', | ||||
|         u'md5': u'9dcfe344732808dbfcc901537973c922', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Kaffeeland Schweiz", | ||||
|             u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",  | ||||
| @@ -52,18 +51,12 @@ class DreiSatIE(InfoExtractor): | ||||
|             'width': int(fe.find('./width').text), | ||||
|             'height': int(fe.find('./height').text), | ||||
|             'url': fe.find('./url').text, | ||||
|             'ext': determine_ext(fe.find('./url').text), | ||||
|             'filesize': int(fe.find('./filesize').text), | ||||
|             'video_bitrate': int(fe.find('./videoBitrate').text), | ||||
|             '3sat_qualityname': fe.find('./quality').text, | ||||
|         } for fe in format_els | ||||
|             if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')] | ||||
|  | ||||
|         def _sortkey(format): | ||||
|             qidx = ['low', 'med', 'high', 'veryhigh'].index(format['3sat_qualityname']) | ||||
|             prefer_http = 1 if 'rtmp' in format['url'] else 0 | ||||
|             return (qidx, prefer_http, format['video_bitrate']) | ||||
|         formats.sort(key=_sortkey) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             '_type': 'video', | ||||
|   | ||||
							
								
								
									
										32
									
								
								youtube_dl/extractor/dropbox.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								youtube_dl/extractor/dropbox.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import os.path | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class DropboxIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)' | ||||
|     _TEST = { | ||||
|         'url': 'https://www.dropbox.com/s/0qr9sai2veej4f8/THE_DOCTOR_GAMES.mp4', | ||||
|         'md5': '8ae17c51172fb7f93bdd6a214cc8c896', | ||||
|         'info_dict': { | ||||
|             'id': '0qr9sai2veej4f8', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'THE_DOCTOR_GAMES' | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|         title = os.path.splitext(mobj.group('title'))[0] | ||||
|         video_url = url + '?dl=1' | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': title, | ||||
|             'url': video_url, | ||||
|         } | ||||
							
								
								
									
										58
									
								
								youtube_dl/extractor/elpais.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								youtube_dl/extractor/elpais.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import unified_strdate | ||||
|  | ||||
|  | ||||
| class ElPaisIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])' | ||||
|     IE_DESC = 'El País' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html', | ||||
|         'md5': '98406f301f19562170ec071b83433d55', | ||||
|         'info_dict': { | ||||
|             'id': 'tiempo-nuevo-recetas-viejas', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Tiempo nuevo, recetas viejas', | ||||
|             'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.', | ||||
|             'upload_date': '20140206', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         prefix = self._html_search_regex( | ||||
|             r'var url_cache = "([^"]+)";', webpage, 'URL prefix') | ||||
|         video_suffix = self._search_regex( | ||||
|             r"URLMediaFile = url_cache \+ '([^']+)'", webpage, 'video URL') | ||||
|         video_url = prefix + video_suffix | ||||
|         thumbnail_suffix = self._search_regex( | ||||
|             r"URLMediaStill = url_cache \+ '([^']+)'", webpage, 'thumbnail URL', | ||||
|             fatal=False) | ||||
|         thumbnail = ( | ||||
|             None if thumbnail_suffix is None | ||||
|             else prefix + thumbnail_suffix) | ||||
|         title = self._html_search_regex( | ||||
|             '<h2 class="entry-header entry-title.*?>(.*?)</h2>', | ||||
|             webpage, 'title') | ||||
|         date_str = self._search_regex( | ||||
|             r'<p class="date-header date-int updated"\s+title="([^"]+)">', | ||||
|             webpage, 'upload date', fatal=False) | ||||
|         upload_date = (None if date_str is None else unified_strdate(date_str)) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': title, | ||||
|             'description': self._og_search_description(webpage), | ||||
|             'thumbnail': thumbnail, | ||||
|             'upload_date': upload_date, | ||||
|         } | ||||
| @@ -1,9 +1,9 @@ | ||||
| import json | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|  | ||||
|     ExtractorError, | ||||
| @@ -11,70 +11,68 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class EscapistIE(InfoExtractor): | ||||
|     _VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' | ||||
|     _VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<id>[0-9]+)-' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate', | ||||
|         u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4', | ||||
|         u'md5': u'ab3a706c681efca53f0a35f1415cf0d1', | ||||
|         u'info_dict': { | ||||
|             u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",  | ||||
|             u"uploader": u"the-escapist-presents",  | ||||
|             u"title": u"Breaking Down Baldur's Gate" | ||||
|         'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate', | ||||
|         'md5': 'ab3a706c681efca53f0a35f1415cf0d1', | ||||
|         'info_dict': { | ||||
|             'id': '6618', | ||||
|             'ext': 'mp4', | ||||
|             'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.", | ||||
|             'uploader': 'the-escapist-presents', | ||||
|             'title': "Breaking Down Baldur's Gate", | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         showName = mobj.group('showname') | ||||
|         videoId = mobj.group('episode') | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         self.report_extraction(videoId) | ||||
|         webpage = self._download_webpage(url, videoId) | ||||
|         self.report_extraction(video_id) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         videoDesc = self._html_search_regex( | ||||
|             r'<meta name="description" content="([^"]*)"', | ||||
|             webpage, u'description', fatal=False) | ||||
|             webpage, 'description', fatal=False) | ||||
|  | ||||
|         playerUrl = self._og_search_video_url(webpage, name=u'player URL') | ||||
|  | ||||
|         title = self._html_search_regex( | ||||
|             r'<meta name="title" content="([^"]*)"', | ||||
|             webpage, u'title').split(' : ')[-1] | ||||
|             webpage, 'title').split(' : ')[-1] | ||||
|  | ||||
|         configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL') | ||||
|         configUrl = self._search_regex('config=(.*)$', playerUrl, 'config URL') | ||||
|         configUrl = compat_urllib_parse.unquote(configUrl) | ||||
|  | ||||
|         formats = [] | ||||
|  | ||||
|         def _add_format(name, cfgurl): | ||||
|             configJSON = self._download_webpage( | ||||
|                 cfgurl, videoId, | ||||
|                 u'Downloading ' + name + ' configuration', | ||||
|                 u'Unable to download ' + name + ' configuration') | ||||
|         def _add_format(name, cfgurl, quality): | ||||
|             config = self._download_json( | ||||
|                 cfgurl, video_id, | ||||
|                 'Downloading ' + name + ' configuration', | ||||
|                 'Unable to download ' + name + ' configuration', | ||||
|                 transform_source=lambda s: s.replace("'", '"')) | ||||
|  | ||||
|             # Technically, it's JavaScript, not JSON | ||||
|             configJSON = configJSON.replace("'", '"') | ||||
|  | ||||
|             try: | ||||
|                 config = json.loads(configJSON) | ||||
|             except (ValueError,) as err: | ||||
|                 raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err)) | ||||
|             playlist = config['playlist'] | ||||
|             formats.append({ | ||||
|                 'url': playlist[1]['url'], | ||||
|                 'format_id': name, | ||||
|                 'quality': quality, | ||||
|             }) | ||||
|  | ||||
|         _add_format(u'normal', configUrl) | ||||
|         _add_format('normal', configUrl, quality=0) | ||||
|         hq_url = (configUrl + | ||||
|                   ('&hq=1' if '?' in configUrl else configUrl + '?hq=1')) | ||||
|         try: | ||||
|             _add_format(u'hq', hq_url) | ||||
|             _add_format('hq', hq_url, quality=1) | ||||
|         except ExtractorError: | ||||
|             pass  # That's fine, we'll just use normal quality | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': videoId, | ||||
|             'id': video_id, | ||||
|             'formats': formats, | ||||
|             'uploader': showName, | ||||
|             'title': title, | ||||
|   | ||||
							
								
								
									
										69
									
								
								youtube_dl/extractor/everyonesmixtape.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								youtube_dl/extractor/everyonesmixtape.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,69 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_request, | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class EveryonesMixtapeIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5', | ||||
|         'file': '5bfseWNmlds.mp4', | ||||
|         "info_dict": { | ||||
|             "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)", | ||||
|             "uploader": "FKR.TV", | ||||
|             "uploader_id": "frenchkissrecords", | ||||
|             "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com", | ||||
|             "upload_date": "20081015" | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': True,  # This is simply YouTube | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         playlist_id = mobj.group('id') | ||||
|  | ||||
|         pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id | ||||
|         pllist_req = compat_urllib_request.Request(pllist_url) | ||||
|         pllist_req.add_header('X-Requested-With', 'XMLHttpRequest') | ||||
|  | ||||
|         playlist_list = self._download_json( | ||||
|             pllist_req, playlist_id, note='Downloading playlist metadata') | ||||
|         try: | ||||
|             playlist_no = next(playlist['id'] | ||||
|                                for playlist in playlist_list | ||||
|                                if playlist['code'] == playlist_id) | ||||
|         except StopIteration: | ||||
|             raise ExtractorError('Playlist id not found') | ||||
|  | ||||
|         pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no | ||||
|         pl_req = compat_urllib_request.Request(pl_url) | ||||
|         pl_req.add_header('X-Requested-With', 'XMLHttpRequest') | ||||
|         playlist = self._download_json( | ||||
|             pl_req, playlist_id, note='Downloading playlist info') | ||||
|  | ||||
|         entries = [{ | ||||
|             '_type': 'url', | ||||
|             'url': t['url'], | ||||
|             'title': t['title'], | ||||
|         } for t in playlist['tracks']] | ||||
|  | ||||
|         if mobj.group('songnr'): | ||||
|             songnr = int(mobj.group('songnr')) - 1 | ||||
|             return entries[songnr] | ||||
|  | ||||
|         playlist_title = playlist['mixData']['name'] | ||||
|         return { | ||||
|             '_type': 'playlist', | ||||
|             'id': playlist_id, | ||||
|             'title': playlist_title, | ||||
|             'entries': entries, | ||||
|         } | ||||
| @@ -1,56 +1,58 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class ExfmIE(InfoExtractor): | ||||
|     IE_NAME = u'exfm' | ||||
|     IE_DESC = u'ex.fm' | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)' | ||||
|     _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream' | ||||
|     IE_NAME = 'exfm' | ||||
|     IE_DESC = 'ex.fm' | ||||
|     _VALID_URL = r'http://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)' | ||||
|     _SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream' | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             u'url': u'http://ex.fm/song/eh359', | ||||
|             u'file': u'44216187.mp3', | ||||
|             u'md5': u'e45513df5631e6d760970b14cc0c11e7', | ||||
|             u'info_dict': { | ||||
|                 u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive", | ||||
|                 u"uploader": u"deadjournalist", | ||||
|                 u'upload_date': u'20120424', | ||||
|                 u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive', | ||||
|             'url': 'http://ex.fm/song/eh359', | ||||
|             'md5': 'e45513df5631e6d760970b14cc0c11e7', | ||||
|             'info_dict': { | ||||
|                 'id': '44216187', | ||||
|                 'ext': 'mp3', | ||||
|                 'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive', | ||||
|                 'uploader': 'deadjournalist', | ||||
|                 'upload_date': '20120424', | ||||
|                 'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive', | ||||
|             }, | ||||
|             u'note': u'Soundcloud song', | ||||
|             u'skip': u'The site is down too often', | ||||
|             'note': 'Soundcloud song', | ||||
|             'skip': 'The site is down too often', | ||||
|         }, | ||||
|         { | ||||
|             u'url': u'http://ex.fm/song/wddt8', | ||||
|             u'file': u'wddt8.mp3', | ||||
|             u'md5': u'966bd70741ac5b8570d8e45bfaed3643', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'Safe and Sound', | ||||
|                 u'uploader': u'Capital Cities', | ||||
|             'url': 'http://ex.fm/song/wddt8', | ||||
|             'md5': '966bd70741ac5b8570d8e45bfaed3643', | ||||
|             'info_dict': { | ||||
|                 'id': 'wddt8', | ||||
|                 'ext': 'mp3', | ||||
|                 'title': 'Safe and Sound', | ||||
|                 'uploader': 'Capital Cities', | ||||
|             }, | ||||
|             u'skip': u'The site is down too often', | ||||
|             'skip': 'The site is down too often', | ||||
|         }, | ||||
|     ] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         song_id = mobj.group(1) | ||||
|         info_url = "http://ex.fm/api/v3/song/%s" %(song_id) | ||||
|         webpage = self._download_webpage(info_url, song_id) | ||||
|         info = json.loads(webpage) | ||||
|         song_url = info['song']['url'] | ||||
|         song_id = mobj.group('id') | ||||
|         info_url = "http://ex.fm/api/v3/song/%s" % song_id | ||||
|         info = self._download_json(info_url, song_id)['song'] | ||||
|         song_url = info['url'] | ||||
|         if re.match(self._SOUNDCLOUD_URL, song_url) is not None: | ||||
|             self.to_screen('Soundcloud song detected') | ||||
|             return self.url_result(song_url.replace('/stream',''), 'Soundcloud') | ||||
|         return [{ | ||||
|             'id':          song_id, | ||||
|             'url':         song_url, | ||||
|             'ext':         'mp3', | ||||
|             'title':       info['song']['title'], | ||||
|             'thumbnail':   info['song']['image']['large'], | ||||
|             'uploader':    info['song']['artist'], | ||||
|             'view_count':  info['song']['loved_count'], | ||||
|         }] | ||||
|             return self.url_result(song_url.replace('/stream', ''), 'Soundcloud') | ||||
|         return { | ||||
|             'id': song_id, | ||||
|             'url': song_url, | ||||
|             'ext': 'mp3', | ||||
|             'title': info['title'], | ||||
|             'thumbnail': info['image']['large'], | ||||
|             'uploader': info['artist'], | ||||
|             'view_count': info['loved_count'], | ||||
|         } | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
| import socket | ||||
| @@ -9,33 +11,34 @@ from ..utils import ( | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_request, | ||||
|     urlencode_postdata, | ||||
|  | ||||
|     ExtractorError, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FacebookIE(InfoExtractor): | ||||
|     """Information Extractor for Facebook""" | ||||
|  | ||||
|     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:[^#?]*#!/)?(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' | ||||
|     _VALID_URL = r'''(?x) | ||||
|         https?://(?:\w+\.)?facebook\.com/ | ||||
|         (?:[^#?]*\#!/)? | ||||
|         (?:video/video\.php|photo\.php|video/embed)\?(?:.*?) | ||||
|         (?:v|video_id)=(?P<id>[0-9]+) | ||||
|         (?:.*)''' | ||||
|     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' | ||||
|     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' | ||||
|     _NETRC_MACHINE = 'facebook' | ||||
|     IE_NAME = u'facebook' | ||||
|     IE_NAME = 'facebook' | ||||
|     _TEST = { | ||||
|         u'url': u'https://www.facebook.com/photo.php?v=120708114770723', | ||||
|         u'file': u'120708114770723.mp4', | ||||
|         u'md5': u'48975a41ccc4b7a581abd68651c1a5a8', | ||||
|         u'info_dict': { | ||||
|             u"duration": 279, | ||||
|             u"title": u"PEOPLE ARE AWESOME 2013" | ||||
|         'url': 'https://www.facebook.com/photo.php?v=120708114770723', | ||||
|         'md5': '48975a41ccc4b7a581abd68651c1a5a8', | ||||
|         'info_dict': { | ||||
|             'id': '120708114770723', | ||||
|             'ext': 'mp4', | ||||
|             'duration': 279, | ||||
|             'title': 'PEOPLE ARE AWESOME 2013', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def report_login(self): | ||||
|         """Report attempt to log in.""" | ||||
|         self.to_screen(u'Logging in') | ||||
|  | ||||
|     def _login(self): | ||||
|         (useremail, password) = self._get_login_info() | ||||
|         if useremail is None: | ||||
| @@ -43,11 +46,13 @@ class FacebookIE(InfoExtractor): | ||||
|  | ||||
|         login_page_req = compat_urllib_request.Request(self._LOGIN_URL) | ||||
|         login_page_req.add_header('Cookie', 'locale=en_US') | ||||
|         self.report_login() | ||||
|         login_page = self._download_webpage(login_page_req, None, note=False, | ||||
|             errnote=u'Unable to download login page') | ||||
|         lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd') | ||||
|         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd') | ||||
|         login_page = self._download_webpage(login_page_req, None, | ||||
|             note='Downloading login page', | ||||
|             errnote='Unable to download login page') | ||||
|         lsd = self._search_regex( | ||||
|             r'<input type="hidden" name="lsd" value="([^"]*)"', | ||||
|             login_page, 'lsd') | ||||
|         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') | ||||
|  | ||||
|         login_form = { | ||||
|             'email': useremail, | ||||
| @@ -60,27 +65,29 @@ class FacebookIE(InfoExtractor): | ||||
|             'timezone': '-60', | ||||
|             'trynum': '1', | ||||
|             } | ||||
|         request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) | ||||
|         request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         try: | ||||
|             login_results = compat_urllib_request.urlopen(request).read() | ||||
|             login_results = self._download_webpage(request, None, | ||||
|                 note='Logging in', errnote='unable to fetch login page') | ||||
|             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: | ||||
|                 self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') | ||||
|                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') | ||||
|                 return | ||||
|  | ||||
|             check_form = { | ||||
|                 'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'), | ||||
|                 'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'), | ||||
|                 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), | ||||
|                 'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, 'nh'), | ||||
|                 'name_action_selected': 'dont_save', | ||||
|                 'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'), | ||||
|                 'submit[Continue]': self._search_regex(r'<button[^>]+value="(.*?)"[^>]+name="submit\[Continue\]"', login_results, 'continue'), | ||||
|             } | ||||
|             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form)) | ||||
|             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) | ||||
|             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|             check_response = compat_urllib_request.urlopen(check_req).read() | ||||
|             check_response = self._download_webpage(check_req, None, | ||||
|                 note='Confirming login') | ||||
|             if re.search(r'id="checkpointSubmitButton"', check_response) is not None: | ||||
|                 self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.') | ||||
|                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') | ||||
|         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | ||||
|             self._downloader.report_warning(u'unable to log in: %s' % compat_str(err)) | ||||
|             self._downloader.report_warning('unable to log in: %s' % compat_str(err)) | ||||
|             return | ||||
|  | ||||
|     def _real_initialize(self): | ||||
| @@ -88,9 +95,7 @@ class FacebookIE(InfoExtractor): | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|         video_id = mobj.group('ID') | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
| @@ -102,10 +107,10 @@ class FacebookIE(InfoExtractor): | ||||
|             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) | ||||
|             if m_msg is not None: | ||||
|                 raise ExtractorError( | ||||
|                     u'The video is not available, Facebook said: "%s"' % m_msg.group(1), | ||||
|                     'The video is not available, Facebook said: "%s"' % m_msg.group(1), | ||||
|                     expected=True) | ||||
|             else: | ||||
|                 raise ExtractorError(u'Cannot parse data') | ||||
|                 raise ExtractorError('Cannot parse data') | ||||
|         data = dict(json.loads(m.group(1))) | ||||
|         params_raw = compat_urllib_parse.unquote(data['params']) | ||||
|         params = json.loads(params_raw) | ||||
| @@ -114,19 +119,15 @@ class FacebookIE(InfoExtractor): | ||||
|         if not video_url: | ||||
|             video_url = video_data['sd_src'] | ||||
|         if not video_url: | ||||
|             raise ExtractorError(u'Cannot find video URL') | ||||
|         video_duration = int(video_data['video_duration']) | ||||
|         thumbnail = video_data['thumbnail_src'] | ||||
|             raise ExtractorError('Cannot find video URL') | ||||
|  | ||||
|         video_title = self._html_search_regex( | ||||
|             r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, u'title') | ||||
|             r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title') | ||||
|  | ||||
|         info = { | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': video_title, | ||||
|             'url': video_url, | ||||
|             'ext': 'mp4', | ||||
|             'duration': video_duration, | ||||
|             'thumbnail': thumbnail, | ||||
|             'duration': int(video_data['video_duration']), | ||||
|             'thumbnail': video_data['thumbnail_src'], | ||||
|         } | ||||
|         return [info] | ||||
|   | ||||
							
								
								
									
										38
									
								
								youtube_dl/extractor/firstpost.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								youtube_dl/extractor/firstpost.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class FirstpostIE(InfoExtractor): | ||||
|     IE_NAME = 'Firstpost.com' | ||||
|     _VALID_URL = r'http://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://www.firstpost.com/india/india-to-launch-indigenous-aircraft-carrier-monday-1025403.html', | ||||
|         'md5': 'ee9114957692f01fb1263ed87039112a', | ||||
|         'info_dict': { | ||||
|             'id': '1025403', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'India to launch indigenous aircraft carrier INS Vikrant today', | ||||
|             'description': 'Its flight deck is over twice the size of a football field, its power unit can light up the entire Kochi city and the cabling is enough to cover the distance between here to Delhi.', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         video_url = self._html_search_regex( | ||||
|             r'<div.*?name="div_video".*?flashvars="([^"]+)">', | ||||
|             webpage, 'video URL') | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': self._og_search_title(webpage), | ||||
|             'description': self._og_search_description(webpage), | ||||
|             'thumbnail': self._og_search_thumbnail(webpage), | ||||
|         } | ||||
							
								
								
									
										60
									
								
								youtube_dl/extractor/firsttv.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								youtube_dl/extractor/firsttv.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,60 @@ | ||||
| # encoding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import int_or_none | ||||
|  | ||||
|  | ||||
| class FirstTVIE(InfoExtractor): | ||||
|     IE_NAME = 'firsttv' | ||||
|     IE_DESC = 'Видеоархив - Первый канал' | ||||
|     _VALID_URL = r'http://(?:www\.)?1tv\.ru/videoarchive/(?P<id>\d+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://www.1tv.ru/videoarchive/73390', | ||||
|         'md5': '3de6390cf0cca4a5eae1d1d83895e5ad', | ||||
|         'info_dict': { | ||||
|             'id': '73390', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Олимпийские канатные дороги', | ||||
|             'description': 'md5:cc730d2bf4215463e37fff6a1e277b13', | ||||
|             'thumbnail': 'http://img1.1tv.ru/imgsize640x360/PR20140210114657.JPG', | ||||
|             'duration': 149, | ||||
|         }, | ||||
|         'skip': 'Only works from Russia', | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id, 'Downloading page') | ||||
|  | ||||
|         video_url = self._html_search_regex( | ||||
|             r'''(?s)jwplayer\('flashvideoportal_1'\)\.setup\({.*?'file': '([^']+)'.*?}\);''', webpage, 'video URL') | ||||
|  | ||||
|         title = self._html_search_regex( | ||||
|             r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', webpage, 'title') | ||||
|         description = self._html_search_regex( | ||||
|             r'<div class="descr">\s*<div> </div>\s*<p>([^<]*)</p></div>', webpage, 'description', fatal=False) | ||||
|  | ||||
|         thumbnail = self._og_search_thumbnail(webpage) | ||||
|         duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False) | ||||
|  | ||||
|         like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]', | ||||
|             webpage, 'like count', fatal=False) | ||||
|         dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]', | ||||
|             webpage, 'dislike count', fatal=False) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'thumbnail': thumbnail, | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'duration': int_or_none(duration), | ||||
|             'like_count': int_or_none(like_count), | ||||
|             'dislike_count': int_or_none(dislike_count), | ||||
|         } | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -11,13 +13,13 @@ class FlickrIE(InfoExtractor): | ||||
|     """Information Extractor for Flickr videos""" | ||||
|     _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', | ||||
|         u'file': u'5645318632.mp4', | ||||
|         u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b', | ||||
|         u'info_dict': { | ||||
|             u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",  | ||||
|             u"uploader_id": u"forestwander-nature-pictures",  | ||||
|             u"title": u"Dark Hollow Waterfalls" | ||||
|         'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', | ||||
|         'file': '5645318632.mp4', | ||||
|         'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b', | ||||
|         'info_dict': { | ||||
|             "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",  | ||||
|             "uploader_id": "forestwander-nature-pictures",  | ||||
|             "title": "Dark Hollow Waterfalls" | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -29,13 +31,13 @@ class FlickrIE(InfoExtractor): | ||||
|         webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id | ||||
|         webpage = self._download_webpage(webpage_url, video_id) | ||||
|  | ||||
|         secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret') | ||||
|         secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, 'secret') | ||||
|  | ||||
|         first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self' | ||||
|         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage') | ||||
|  | ||||
|         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>', | ||||
|             first_xml, u'node_id') | ||||
|             first_xml, 'node_id') | ||||
|  | ||||
|         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1' | ||||
|         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage') | ||||
| @@ -44,7 +46,7 @@ class FlickrIE(InfoExtractor): | ||||
|  | ||||
|         mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Unable to extract video url') | ||||
|             raise ExtractorError('Unable to extract video url') | ||||
|         video_url = mobj.group(1) + unescapeHTML(mobj.group(2)) | ||||
|  | ||||
|         return [{ | ||||
|   | ||||
							
								
								
									
										95
									
								
								youtube_dl/extractor/fourtube.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								youtube_dl/extractor/fourtube.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,95 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_request, | ||||
|     unified_strdate, | ||||
|     str_to_int, | ||||
|     parse_duration, | ||||
|     clean_html, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FourTubeIE(InfoExtractor): | ||||
|     IE_NAME = '4tube' | ||||
|     _VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', | ||||
|         'md5': '6516c8ac63b03de06bc8eac14362db4f', | ||||
|         'info_dict': { | ||||
|             'id': '209733', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Hot Babe Holly Michaels gets her ass stuffed by black', | ||||
|             'uploader': 'WCP Club', | ||||
|             'uploader_id': 'wcp-club', | ||||
|             'upload_date': '20131031', | ||||
|             'duration': 583, | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|  | ||||
|         video_id = mobj.group('id') | ||||
|         webpage_url = 'http://www.4tube.com/videos/' + video_id | ||||
|         webpage = self._download_webpage(webpage_url, video_id) | ||||
|  | ||||
|         self.report_extraction(video_id) | ||||
|  | ||||
|         playlist_json = self._html_search_regex(r'var playerConfigPlaylist\s+=\s+([^;]+)', webpage, 'Playlist') | ||||
|         media_id = self._search_regex(r'idMedia:\s*(\d+)', playlist_json, 'Media Id') | ||||
|         sources = self._search_regex(r'sources:\s*\[([^\]]*)\]', playlist_json, 'Sources').split(',') | ||||
|         title = self._search_regex(r'title:\s*"([^"]*)', playlist_json, 'Title') | ||||
|         thumbnail_url = self._search_regex(r'image:\s*"([^"]*)', playlist_json, 'Thumbnail', fatal=False) | ||||
|  | ||||
|         uploader_str = self._search_regex(r'<span>Uploaded by</span>(.*?)<span>', webpage, 'uploader', fatal=False) | ||||
|         mobj = re.search(r'<a href="/sites/(?P<id>[^"]+)"><strong>(?P<name>[^<]+)</strong></a>', uploader_str) | ||||
|         (uploader, uploader_id) = (mobj.group('name'), mobj.group('id')) if mobj else (clean_html(uploader_str), None) | ||||
|  | ||||
|         upload_date = None | ||||
|         view_count = None | ||||
|         duration = None | ||||
|         description = self._html_search_meta('description', webpage, 'description') | ||||
|         if description: | ||||
|             upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date', | ||||
|                 fatal=False) | ||||
|             if upload_date: | ||||
|                 upload_date = unified_strdate(upload_date) | ||||
|             view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False) | ||||
|             if view_count: | ||||
|                 view_count = str_to_int(view_count) | ||||
|             duration = parse_duration(self._search_regex(r'Length: (\d+m\d+s)', description, 'duration', fatal=False)) | ||||
|  | ||||
|         token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources)) | ||||
|         headers = { | ||||
|                 b'Content-Type': b'application/x-www-form-urlencoded', | ||||
|                 b'Origin': b'http://www.4tube.com', | ||||
|                 } | ||||
|         token_req = compat_urllib_request.Request(token_url, b'{}', headers) | ||||
|         tokens = self._download_json(token_req, video_id) | ||||
|  | ||||
|         formats = [{ | ||||
|             'url': tokens[format]['token'], | ||||
|             'format_id': format + 'p', | ||||
|             'resolution': format + 'p', | ||||
|             'quality': int(format), | ||||
|             } for format in sources] | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': title, | ||||
|             'formats': formats, | ||||
|             'thumbnail': thumbnail_url, | ||||
|             'uploader': uploader, | ||||
|             'uploader_id': uploader_id, | ||||
|             'upload_date': upload_date, | ||||
|             'view_count': view_count, | ||||
|             'duration': duration, | ||||
|             'age_limit': 18, | ||||
|             'webpage_url': webpage_url, | ||||
|         } | ||||
							
								
								
									
										38
									
								
								youtube_dl/extractor/franceinter.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								youtube_dl/extractor/franceinter.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class FranceInterIE(InfoExtractor): | ||||
|     _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.franceinter.fr/player/reecouter?play=793962', | ||||
|         'file': '793962.mp3', | ||||
|         'md5': '4764932e466e6f6c79c317d2e74f6884', | ||||
|         "info_dict": { | ||||
|             "title": "L’Histoire dans les jeux vidéo", | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         title = self._html_search_regex( | ||||
|             r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title') | ||||
|         path = self._search_regex( | ||||
|             r'&urlAOD=(.*?)&startTime', webpage, 'video url') | ||||
|         video_url = 'http://www.franceinter.fr/' + path | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'formats': [{ | ||||
|                 'url': video_url, | ||||
|                 'vcodec': 'none', | ||||
|             }], | ||||
|             'title': title, | ||||
|         } | ||||
| @@ -1,4 +1,7 @@ | ||||
| # encoding: utf-8 | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -30,7 +33,7 @@ class FranceTVBaseInfoExtractor(InfoExtractor): | ||||
|  | ||||
|  | ||||
| class PluzzIE(FranceTVBaseInfoExtractor): | ||||
|     IE_NAME = u'pluzz.francetv.fr' | ||||
|     IE_NAME = 'pluzz.francetv.fr' | ||||
|     _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html' | ||||
|  | ||||
|     # Can't use tests, videos expire in 7 days | ||||
| @@ -44,17 +47,17 @@ class PluzzIE(FranceTVBaseInfoExtractor): | ||||
|  | ||||
|  | ||||
| class FranceTvInfoIE(FranceTVBaseInfoExtractor): | ||||
|     IE_NAME = u'francetvinfo.fr' | ||||
|     IE_NAME = 'francetvinfo.fr' | ||||
|     _VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+)\.html' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html', | ||||
|         u'file': u'84981923.mp4', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Soir 3', | ||||
|         'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html', | ||||
|         'file': '84981923.mp4', | ||||
|         'info_dict': { | ||||
|             'title': 'Soir 3', | ||||
|         }, | ||||
|         u'params': { | ||||
|             u'skip_download': True, | ||||
|         'params': { | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|     } | ||||
|  | ||||
| @@ -62,13 +65,13 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         page_title = mobj.group('title') | ||||
|         webpage = self._download_webpage(url, page_title) | ||||
|         video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id') | ||||
|         video_id = self._search_regex(r'id-video=(\d+?)[@"]', webpage, 'video id') | ||||
|         return self._extract_video(video_id) | ||||
|  | ||||
|  | ||||
| class FranceTVIE(FranceTVBaseInfoExtractor): | ||||
|     IE_NAME = u'francetv' | ||||
|     IE_DESC = u'France 2, 3, 4, 5 and Ô' | ||||
|     IE_NAME = 'francetv' | ||||
|     IE_DESC = 'France 2, 3, 4, 5 and Ô' | ||||
|     _VALID_URL = r'''(?x)https?://www\.france[2345o]\.fr/ | ||||
|         (?: | ||||
|             emissions/.*?/(videos|emissions)/(?P<id>[^/?]+) | ||||
| @@ -78,73 +81,73 @@ class FranceTVIE(FranceTVBaseInfoExtractor): | ||||
|     _TESTS = [ | ||||
|         # france2 | ||||
|         { | ||||
|             u'url': u'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104', | ||||
|             u'file': u'75540104.mp4', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'13h15, le samedi...', | ||||
|                 u'description': u'md5:2e5b58ba7a2d3692b35c792be081a03d', | ||||
|             'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104', | ||||
|             'file': '75540104.mp4', | ||||
|             'info_dict': { | ||||
|                 'title': '13h15, le samedi...', | ||||
|                 'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d', | ||||
|             }, | ||||
|             u'params': { | ||||
|             'params': { | ||||
|                 # m3u8 download | ||||
|                 u'skip_download': True, | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # france3 | ||||
|         { | ||||
|             u'url': u'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au', | ||||
|                 u'ext': u'flv', | ||||
|                 u'title': u'Le scandale du prix des médicaments', | ||||
|                 u'description': u'md5:1384089fbee2f04fc6c9de025ee2e9ce', | ||||
|             'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575', | ||||
|             'info_dict': { | ||||
|                 'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'Le scandale du prix des médicaments', | ||||
|                 'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce', | ||||
|             }, | ||||
|             u'params': { | ||||
|             'params': { | ||||
|                 # rtmp download | ||||
|                 u'skip_download': True, | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # france4 | ||||
|         { | ||||
|             u'url': u'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', | ||||
|                 u'ext': u'flv', | ||||
|                 u'title': u'Hero Corp Making of - Extrait 1', | ||||
|                 u'description': u'md5:c87d54871b1790679aec1197e73d650a', | ||||
|             'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', | ||||
|             'info_dict': { | ||||
|                 'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'Hero Corp Making of - Extrait 1', | ||||
|                 'description': 'md5:c87d54871b1790679aec1197e73d650a', | ||||
|             }, | ||||
|             u'params': { | ||||
|             'params': { | ||||
|                 # rtmp download | ||||
|                 u'skip_download': True, | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # france5 | ||||
|         { | ||||
|             u'url': u'http://www.france5.fr/emissions/c-a-dire/videos/92837968', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'92837968', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'C à dire ?!', | ||||
|                 u'description': u'md5:fb1db1cbad784dcce7c7a7bd177c8e2f', | ||||
|             'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968', | ||||
|             'info_dict': { | ||||
|                 'id': '92837968', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'C à dire ?!', | ||||
|                 'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f', | ||||
|             }, | ||||
|             u'params': { | ||||
|             'params': { | ||||
|                 # m3u8 download | ||||
|                 u'skip_download': True, | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # franceo | ||||
|         { | ||||
|             u'url': u'http://www.franceo.fr/jt/info-afrique/04-12-2013', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'92327925', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'Infô-Afrique', | ||||
|                 u'description': u'md5:ebf346da789428841bee0fd2a935ea55', | ||||
|             'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013', | ||||
|             'info_dict': { | ||||
|                 'id': '92327925', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Infô-Afrique', | ||||
|                 'description': 'md5:ebf346da789428841bee0fd2a935ea55', | ||||
|             }, | ||||
|             u'params': { | ||||
|             'params': { | ||||
|                 # m3u8 download | ||||
|                 u'skip_download': True, | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|             u'skip': u'The id changes frequently', | ||||
|             'skip': 'The id changes frequently', | ||||
|         }, | ||||
|     ] | ||||
|  | ||||
| @@ -160,27 +163,28 @@ class FranceTVIE(FranceTVBaseInfoExtractor): | ||||
|                  '\.fr/\?id-video=([^"/&]+)'), | ||||
|                 (r'<a class="video" id="ftv_player_(.+?)"'), | ||||
|             ] | ||||
|             video_id = self._html_search_regex(id_res, webpage, u'video ID') | ||||
|             video_id = self._html_search_regex(id_res, webpage, 'video ID') | ||||
|         else: | ||||
|             video_id = mobj.group('id') | ||||
|         return self._extract_video(video_id) | ||||
|  | ||||
|  | ||||
| class GenerationQuoiIE(InfoExtractor): | ||||
|     IE_NAME = u'france2.fr:generation-quoi' | ||||
|     IE_NAME = 'france2.fr:generation-quoi' | ||||
|     _VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://generation-quoi.france2.fr/portrait/garde-a-vous', | ||||
|         u'file': u'k7FJX8VBcvvLmX4wA5Q.mp4', | ||||
|         u'info_dict': { | ||||
|             u'title': u'Génération Quoi - Garde à Vous', | ||||
|             u'uploader': u'Génération Quoi', | ||||
|         'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous', | ||||
|         'file': 'k7FJX8VBcvvLmX4wA5Q.mp4', | ||||
|         'info_dict': { | ||||
|             'title': 'Génération Quoi - Garde à Vous', | ||||
|             'uploader': 'Génération Quoi', | ||||
|         }, | ||||
|         u'params': { | ||||
|         'params': { | ||||
|             # It uses Dailymotion | ||||
|             u'skip_download': True, | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|         'skip': 'Only available from France', | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
| @@ -191,3 +195,29 @@ class GenerationQuoiIE(InfoExtractor): | ||||
|         info = json.loads(info_json) | ||||
|         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'], | ||||
|             ie='Dailymotion') | ||||
|  | ||||
|  | ||||
| class CultureboxIE(FranceTVBaseInfoExtractor): | ||||
|     IE_NAME = 'culturebox.francetvinfo.fr' | ||||
|     _VALID_URL = r'https?://culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813', | ||||
|         'info_dict': { | ||||
|             'id': 'EV_6785', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Einstein on the beach au Théâtre du Châtelet', | ||||
|             'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb', | ||||
|         }, | ||||
|         'params': { | ||||
|             # m3u8 download | ||||
|             'skip_download': True, | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         name = mobj.group('name') | ||||
|         webpage = self._download_webpage(url, name) | ||||
|         video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id') | ||||
|         return self._extract_video(video_id) | ||||
|   | ||||
| @@ -1,18 +1,21 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import determine_ext | ||||
|  | ||||
|  | ||||
| class FreesoundIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)' | ||||
|     _VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/', | ||||
|         u'file': u'194503.mp3', | ||||
|         u'md5': u'12280ceb42c81f19a515c745eae07650', | ||||
|         u'info_dict': { | ||||
|             u"title": u"gulls in the city.wav", | ||||
|             u"uploader" : u"miklovan", | ||||
|             u'description': u'the sounds of seagulls in the city', | ||||
|         'url': 'http://www.freesound.org/people/miklovan/sounds/194503/', | ||||
|         'md5': '12280ceb42c81f19a515c745eae07650', | ||||
|         'info_dict': { | ||||
|             'id': '194503', | ||||
|             'ext': 'mp3', | ||||
|             'title': 'gulls in the city.wav', | ||||
|             'uploader': 'miklovan', | ||||
|             'description': 'the sounds of seagulls in the city', | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -20,17 +23,17 @@ class FreesoundIE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         music_id = mobj.group('id') | ||||
|         webpage = self._download_webpage(url, music_id) | ||||
|         title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>', | ||||
|                                 webpage, 'music title', flags=re.DOTALL) | ||||
|         music_url = self._og_search_property('audio', webpage, 'music url') | ||||
|         description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>', | ||||
|                                 webpage, 'description', fatal=False, flags=re.DOTALL) | ||||
|         title = self._html_search_regex( | ||||
|             r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>', | ||||
|             webpage, 'music title', flags=re.DOTALL) | ||||
|         description = self._html_search_regex( | ||||
|             r'<div id="sound_description">(.*?)</div>', webpage, 'description', | ||||
|             fatal=False, flags=re.DOTALL) | ||||
|  | ||||
|         return [{ | ||||
|             'id':       music_id, | ||||
|             'title':    title,             | ||||
|             'url':      music_url, | ||||
|         return { | ||||
|             'id': music_id, | ||||
|             'title': title, | ||||
|             'url': self._og_search_property('audio', webpage, 'music url'), | ||||
|             'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'), | ||||
|             'ext':      determine_ext(music_url), | ||||
|             'description': description, | ||||
|         }] | ||||
|         } | ||||
|   | ||||
							
								
								
									
										37
									
								
								youtube_dl/extractor/freespeech.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								youtube_dl/extractor/freespeech.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class FreespeechIE(InfoExtractor): | ||||
|     IE_NAME = 'freespeech.org' | ||||
|     _VALID_URL = r'https://www\.freespeech\.org/video/(?P<title>.+)' | ||||
|     _TEST = { | ||||
|         'add_ie': ['Youtube'], | ||||
|         'url': 'https://www.freespeech.org/video/obama-romney-campaign-colorado-ahead-debate-0', | ||||
|         'info_dict': { | ||||
|             'id': 'poKsVCZ64uU', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Obama, Romney Campaign in Colorado Ahead of Debate', | ||||
|             'description': 'Obama, Romney Campaign in Colorado Ahead of Debate', | ||||
|             'uploader': 'freespeechtv', | ||||
|             'uploader_id': 'freespeechtv', | ||||
|             'upload_date': '20121002', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         title = mobj.group('title') | ||||
|         webpage = self._download_webpage(url, title) | ||||
|         info_json = self._search_regex(r'jQuery.extend\(Drupal.settings, ({.*?})\);', webpage, 'info') | ||||
|         info = json.loads(info_json) | ||||
|  | ||||
|         return { | ||||
|             '_type': 'url', | ||||
|             'url': info['jw_player']['basic_video_node_player']['file'], | ||||
|             'ie_key': 'Youtube', | ||||
|         } | ||||
| @@ -1,18 +1,24 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import json | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class FunnyOrDieIE(InfoExtractor): | ||||
|     _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$' | ||||
|     _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', | ||||
|         u'file': u'0732f586d7.mp4', | ||||
|         u'md5': u'f647e9e90064b53b6e046e75d0241fbd', | ||||
|         u'info_dict': { | ||||
|             u"description": u"Lyrics changed to match the video. Spoken cameo by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a concept by Dustin McLean (DustFilms.com). Performed, edited, and written by David A. Scott.",  | ||||
|             u"title": u"Heart-Shaped Box: Literal Video Version" | ||||
|         } | ||||
|         'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', | ||||
|         'file': '0732f586d7.mp4', | ||||
|         'md5': 'f647e9e90064b53b6e046e75d0241fbd', | ||||
|         'info_dict': { | ||||
|             'description': ('Lyrics changed to match the video. Spoken cameo ' | ||||
|                 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' | ||||
|                 'concept by Dustin McLean (DustFilms.com). Performed, edited, ' | ||||
|                 'and written by David A. Scott.'), | ||||
|             'title': 'Heart-Shaped Box: Literal Video Version', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
| @@ -23,13 +29,25 @@ class FunnyOrDieIE(InfoExtractor): | ||||
|  | ||||
|         video_url = self._search_regex( | ||||
|             [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], | ||||
|             webpage, u'video URL', flags=re.DOTALL) | ||||
|             webpage, 'video URL', flags=re.DOTALL) | ||||
|  | ||||
|         info = { | ||||
|         if mobj.group('type') == 'embed': | ||||
|             post_json = self._search_regex( | ||||
|                 r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') | ||||
|             post = json.loads(post_json) | ||||
|             title = post['name'] | ||||
|             description = post.get('description') | ||||
|             thumbnail = post.get('picture') | ||||
|         else: | ||||
|             title = self._og_search_title(webpage) | ||||
|             description = self._og_search_description(webpage) | ||||
|             thumbnail = None | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'ext': 'mp4', | ||||
|             'title': self._og_search_title(webpage), | ||||
|             'description': self._og_search_description(webpage), | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'thumbnail': thumbnail, | ||||
|         } | ||||
|         return [info] | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -6,13 +8,14 @@ from .common import InfoExtractor | ||||
| class GamekingsIE(InfoExtractor): | ||||
|     _VALID_URL = r'http://www\.gamekings\.tv/videos/(?P<name>[0-9a-z\-]+)' | ||||
|     _TEST = { | ||||
|         u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/", | ||||
|         u'file': u'20130811.mp4', | ||||
|         'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/', | ||||
|         # MD5 is flaky, seems to change regularly | ||||
|         #u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3', | ||||
|         # 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review", | ||||
|             u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.", | ||||
|             'id': '20130811', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review', | ||||
|             'description': 'md5:632e61a9f97d700e83f43d77ddafb6a4', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -13,12 +15,12 @@ from ..utils import ( | ||||
| class GameSpotIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?' | ||||
|     _TEST = { | ||||
|         u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", | ||||
|         u"file": u"gs-2300-6410818.mp4", | ||||
|         u"md5": u"b2a30deaa8654fcccd43713a6b6a4825", | ||||
|         u"info_dict": { | ||||
|             u"title": u"Arma 3 - Community Guide: SITREP I", | ||||
|             u'description': u'Check out this video where some of the basics of Arma 3 is explained.', | ||||
|         "url": "http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", | ||||
|         "file": "gs-2300-6410818.mp4", | ||||
|         "md5": "b2a30deaa8654fcccd43713a6b6a4825", | ||||
|         "info_dict": { | ||||
|             "title": "Arma 3 - Community Guide: SITREP I", | ||||
|             'description': 'Check out this video where some of the basics of Arma 3 is explained.', | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -26,7 +28,7 @@ class GameSpotIE(InfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         page_id = mobj.group('page_id') | ||||
|         webpage = self._download_webpage(url, page_id) | ||||
|         data_video_json = self._search_regex(r'data-video=\'(.*?)\'', webpage, u'data video') | ||||
|         data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video') | ||||
|         data_video = json.loads(unescapeHTML(data_video_json)) | ||||
|  | ||||
|         # Transform the manifest url to a link to the mp4 files | ||||
| @@ -34,7 +36,7 @@ class GameSpotIE(InfoExtractor): | ||||
|         f4m_url = data_video['videoStreams']['f4m_stream'] | ||||
|         f4m_path = compat_urlparse.urlparse(f4m_url).path | ||||
|         QUALITIES_RE = r'((,\d+)+,?)' | ||||
|         qualities = self._search_regex(QUALITIES_RE, f4m_path, u'qualities').strip(',').split(',') | ||||
|         qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',') | ||||
|         http_path = f4m_path[1:].split('/', 1)[1] | ||||
|         http_template = re.sub(QUALITIES_RE, r'%s', http_path) | ||||
|         http_template = http_template.replace('.csmil/manifest.f4m', '') | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| import re | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .mtv import MTVServicesInfoExtractor | ||||
|  | ||||
| @@ -6,22 +6,14 @@ from .mtv import MTVServicesInfoExtractor | ||||
| class GametrailersIE(MTVServicesInfoExtractor): | ||||
|     _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer', | ||||
|         u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4', | ||||
|         u'md5': u'4c8e67681a0ea7ec241e8c09b3ea8cf7', | ||||
|         u'info_dict': { | ||||
|             u'title': u'E3 2013: Debut Trailer', | ||||
|             u'description': u'Faith is back!  Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!', | ||||
|         'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer', | ||||
|         'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7', | ||||
|         'info_dict': { | ||||
|             'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'E3 2013: Debut Trailer', | ||||
|             'description': 'Faith is back!  Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!', | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     _FEED_URL = 'http://www.gametrailers.com/feeds/mrss' | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"', | ||||
|                                    r'data-contentId=\'(?P<mgid>mgid:.*?)\''], | ||||
|                                   webpage, u'mgid') | ||||
|         return self._get_videos_info(mgid) | ||||
|   | ||||
							
								
								
									
										134
									
								
								youtube_dl/extractor/gdcvault.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								youtube_dl/extractor/gdcvault.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_request, | ||||
| ) | ||||
|  | ||||
| class GDCVaultIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)' | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple', | ||||
|             'md5': '7ce8388f544c88b7ac11c7ab1b593704', | ||||
|             'info_dict': { | ||||
|                 'id': '1019721', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)' | ||||
|             } | ||||
|         }, | ||||
|         { | ||||
|             'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of', | ||||
|             'info_dict': { | ||||
|                 'id': '1015683', | ||||
|                 'ext': 'flv', | ||||
|                 'title': 'Embracing the Dark Art of Mathematical Modeling in AI' | ||||
|             }, | ||||
|             'params': { | ||||
|                 'skip_download': True,  # Requires rtmpdump | ||||
|             } | ||||
|         }, | ||||
|     ] | ||||
|  | ||||
|     def _parse_mp4(self, xml_description): | ||||
|         video_formats = [] | ||||
|         mp4_video = xml_description.find('./metadata/mp4video') | ||||
|         if mp4_video is None: | ||||
|             return None | ||||
|  | ||||
|         mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video.text) | ||||
|         video_root = mobj.group('root') | ||||
|         formats = xml_description.findall('./metadata/MBRVideos/MBRVideo') | ||||
|         for format in formats: | ||||
|             mobj = re.match(r'mp4\:(?P<path>.*)', format.find('streamName').text) | ||||
|             url = video_root + mobj.group('path') | ||||
|             vbr = format.find('bitrate').text | ||||
|             video_formats.append({ | ||||
|                 'url': url, | ||||
|                 'vbr': int(vbr), | ||||
|             }) | ||||
|         return video_formats | ||||
|  | ||||
|     def _parse_flv(self, xml_description): | ||||
|         video_formats = [] | ||||
|         akami_url = xml_description.find('./metadata/akamaiHost').text | ||||
|         slide_video_path = xml_description.find('./metadata/slideVideo').text | ||||
|         video_formats.append({ | ||||
|             'url': 'rtmp://' + akami_url + '/' + slide_video_path, | ||||
|             'format_note': 'slide deck video', | ||||
|             'quality': -2, | ||||
|             'preference': -2, | ||||
|             'format_id': 'slides', | ||||
|         }) | ||||
|         speaker_video_path = xml_description.find('./metadata/speakerVideo').text | ||||
|         video_formats.append({ | ||||
|             'url': 'rtmp://' + akami_url + '/' + speaker_video_path, | ||||
|             'format_note': 'speaker video', | ||||
|             'quality': -1, | ||||
|             'preference': -1, | ||||
|             'format_id': 'speaker', | ||||
|         }) | ||||
|         return video_formats | ||||
|  | ||||
|     def _login(self, webpage_url, video_id): | ||||
|         (username, password) = self._get_login_info() | ||||
|         if username is None or password is None: | ||||
|             self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.') | ||||
|             return None | ||||
|  | ||||
|         mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url) | ||||
|         login_url = mobj.group('root_url') + 'api/login.php' | ||||
|         logout_url = mobj.group('root_url') + 'logout' | ||||
|  | ||||
|         login_form = { | ||||
|             'email': username, | ||||
|             'password': password, | ||||
|         } | ||||
|  | ||||
|         request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         self._download_webpage(request, video_id, 'Logging in') | ||||
|         start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page') | ||||
|         self._download_webpage(logout_url, video_id, 'Logging out') | ||||
|  | ||||
|         return start_page | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|  | ||||
|         video_id = mobj.group('id') | ||||
|         webpage_url = 'http://www.gdcvault.com/play/' + video_id | ||||
|         start_page = self._download_webpage(webpage_url, video_id) | ||||
|  | ||||
|         xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root', None, False) | ||||
|  | ||||
|         if xml_root is None: | ||||
|             # Probably need to authenticate | ||||
|             start_page = self._login(webpage_url, video_id) | ||||
|             if start_page is None: | ||||
|                 self.report_warning('Could not login.') | ||||
|             else: | ||||
|                 # Grab the url from the authenticated page | ||||
|                 xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root') | ||||
|  | ||||
|         xml_name = self._html_search_regex(r'<iframe src=".*?\?xml=(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', None, False) | ||||
|         if xml_name is None: | ||||
|             # Fallback to the older format | ||||
|             xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename') | ||||
|  | ||||
|         xml_decription_url = xml_root + 'xml/' + xml_name | ||||
|         xml_description = self._download_xml(xml_decription_url, video_id) | ||||
|  | ||||
|         video_title = xml_description.find('./metadata/title').text | ||||
|         video_formats = self._parse_mp4(xml_description) | ||||
|         if video_formats is None: | ||||
|             video_formats = self._parse_flv(xml_description) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': video_title, | ||||
|             'formats': video_formats, | ||||
|         } | ||||
| @@ -1,17 +1,22 @@ | ||||
| # encoding: utf-8 | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import os | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from .youtube import YoutubeIE | ||||
| from ..utils import ( | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_request, | ||||
|     compat_urlparse, | ||||
|     compat_xml_parse_error, | ||||
|  | ||||
|     ExtractorError, | ||||
|     HEADRequest, | ||||
|     parse_xml, | ||||
|     smuggle_url, | ||||
|     unescapeHTML, | ||||
|     unified_strdate, | ||||
| @@ -19,94 +24,165 @@ from ..utils import ( | ||||
| ) | ||||
| from .brightcove import BrightcoveIE | ||||
| from .ooyala import OoyalaIE | ||||
| from .rutv import RUTVIE | ||||
|  | ||||
|  | ||||
| class GenericIE(InfoExtractor): | ||||
|     IE_DESC = u'Generic downloader that works on some sites' | ||||
|     IE_DESC = 'Generic downloader that works on some sites' | ||||
|     _VALID_URL = r'.*' | ||||
|     IE_NAME = u'generic' | ||||
|     IE_NAME = 'generic' | ||||
|     _TESTS = [ | ||||
|         { | ||||
|             u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', | ||||
|             u'file': u'13601338388002.mp4', | ||||
|             u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd', | ||||
|             u'info_dict': { | ||||
|                 u"uploader": u"www.hodiho.fr", | ||||
|                 u"title": u"R\u00e9gis plante sa Jeep" | ||||
|             } | ||||
|         }, | ||||
|         # embedded vimeo video | ||||
|         { | ||||
|             u'add_ie': ['Vimeo'], | ||||
|             u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references', | ||||
|             u'file': u'22444065.mp4', | ||||
|             u'md5': u'2903896e23df39722c33f015af0666e2', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011', | ||||
|                 u"uploader_id": u"skillsmatter", | ||||
|                 u"uploader": u"Skills Matter", | ||||
|             'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', | ||||
|             'file': '13601338388002.mp4', | ||||
|             'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd', | ||||
|             'info_dict': { | ||||
|                 'uploader': 'www.hodiho.fr', | ||||
|                 'title': 'R\u00e9gis plante sa Jeep', | ||||
|             } | ||||
|         }, | ||||
|         # bandcamp page with custom domain | ||||
|         { | ||||
|             u'add_ie': ['Bandcamp'], | ||||
|             u'url': u'http://bronyrock.com/track/the-pony-mash', | ||||
|             u'file': u'3235767654.mp3', | ||||
|             u'info_dict': { | ||||
|                 u'title': u'The Pony Mash', | ||||
|                 u'uploader': u'M_Pallante', | ||||
|             'add_ie': ['Bandcamp'], | ||||
|             'url': 'http://bronyrock.com/track/the-pony-mash', | ||||
|             'file': '3235767654.mp3', | ||||
|             'info_dict': { | ||||
|                 'title': 'The Pony Mash', | ||||
|                 'uploader': 'M_Pallante', | ||||
|             }, | ||||
|             u'skip': u'There is a limit of 200 free downloads / month for the test song', | ||||
|             'skip': 'There is a limit of 200 free downloads / month for the test song', | ||||
|         }, | ||||
|         # embedded brightcove video | ||||
|         # it also tests brightcove videos that need to set the 'Referer' in the | ||||
|         # http requests | ||||
|         { | ||||
|             u'add_ie': ['Brightcove'], | ||||
|             u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'2765128793001', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'Le cours de bourse : l’analyse technique', | ||||
|                 u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9', | ||||
|                 u'uploader': u'BFM BUSINESS', | ||||
|             'add_ie': ['Brightcove'], | ||||
|             'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', | ||||
|             'info_dict': { | ||||
|                 'id': '2765128793001', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Le cours de bourse : l’analyse technique', | ||||
|                 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9', | ||||
|                 'uploader': 'BFM BUSINESS', | ||||
|             }, | ||||
|             u'params': { | ||||
|                 u'skip_download': True, | ||||
|             'params': { | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         { | ||||
|             # https://github.com/rg3/youtube-dl/issues/2253 | ||||
|             'url': 'http://bcove.me/i6nfkrc3', | ||||
|             'file': '3101154703001.mp4', | ||||
|             'md5': '0ba9446db037002366bab3b3eb30c88c', | ||||
|             'info_dict': { | ||||
|                 'title': 'Still no power', | ||||
|                 'uploader': 'thestar.com', | ||||
|                 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.', | ||||
|             }, | ||||
|             'add_ie': ['Brightcove'], | ||||
|         }, | ||||
|         # Direct link to a video | ||||
|         { | ||||
|             u'url': u'http://media.w3.org/2010/05/sintel/trailer.mp4', | ||||
|             u'file': u'trailer.mp4', | ||||
|             u'md5': u'67d406c2bcb6af27fa886f31aa934bbe', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'trailer', | ||||
|                 u'title': u'trailer', | ||||
|                 u'upload_date': u'20100513', | ||||
|             'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4', | ||||
|             'md5': '67d406c2bcb6af27fa886f31aa934bbe', | ||||
|             'info_dict': { | ||||
|                 'id': 'trailer', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'trailer', | ||||
|                 'upload_date': '20100513', | ||||
|             } | ||||
|         }, | ||||
|         # ooyala video | ||||
|         { | ||||
|             u'url': u'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219', | ||||
|             u'md5': u'5644c6ca5d5782c1d0d350dad9bd840c', | ||||
|             u'info_dict': { | ||||
|                 u'id': u'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', | ||||
|                 u'ext': u'mp4', | ||||
|                 u'title': u'2cc213299525360.mov', #that's what we get | ||||
|             'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219', | ||||
|             'md5': '5644c6ca5d5782c1d0d350dad9bd840c', | ||||
|             'info_dict': { | ||||
|                 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': '2cc213299525360.mov',  # that's what we get | ||||
|             }, | ||||
|         }, | ||||
|         # google redirect | ||||
|         { | ||||
|             'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE', | ||||
|             'info_dict': { | ||||
|                 'id': 'cmQHVoWB5FY', | ||||
|                 'ext': 'mp4', | ||||
|                 'upload_date': '20130224', | ||||
|                 'uploader_id': 'TheVerge', | ||||
|                 'description': 'Chris Ziegler takes a look at the Alcatel OneTouch Fire and the ZTE Open; two of the first Firefox OS handsets to be officially announced.', | ||||
|                 'uploader': 'The Verge', | ||||
|                 'title': 'First Firefox OS phones side-by-side', | ||||
|             }, | ||||
|             'params': { | ||||
|                 'skip_download': False, | ||||
|             } | ||||
|         }, | ||||
|         # embed.ly video | ||||
|         { | ||||
|             'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/', | ||||
|             'info_dict': { | ||||
|                 'id': '9ODmcdjQcHQ', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Tested: Grinding Coffee at 2000 Frames Per Second', | ||||
|                 'upload_date': '20140225', | ||||
|                 'description': 'md5:06a40fbf30b220468f1e0957c0f558ff', | ||||
|                 'uploader': 'Tested', | ||||
|                 'uploader_id': 'testedcom', | ||||
|             }, | ||||
|             # No need to test YoutubeIE here | ||||
|             'params': { | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # funnyordie embed | ||||
|         { | ||||
|             'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns', | ||||
|             'md5': '7cf780be104d40fea7bae52eed4a470e', | ||||
|             'info_dict': { | ||||
|                 'id': '18e820ec3f', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama', | ||||
|                 'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.', | ||||
|             }, | ||||
|         }, | ||||
|         # RUTV embed | ||||
|         { | ||||
|             'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html', | ||||
|             'info_dict': { | ||||
|                 'id': '776940', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Охотское море стало целиком российским', | ||||
|                 'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43', | ||||
|             }, | ||||
|             'params': { | ||||
|                 # m3u8 download | ||||
|                 'skip_download': True, | ||||
|             }, | ||||
|         }, | ||||
|         # Embedded TED video | ||||
|         { | ||||
|             'url': 'http://en.support.wordpress.com/videos/ted-talks/', | ||||
|             'md5': 'deeeabcc1085eb2ba205474e7235a3d5', | ||||
|             'info_dict': { | ||||
|                 'id': '981', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'My web playroom', | ||||
|                 'uploader': 'Ze Frank', | ||||
|                 'description': 'md5:ddb2a40ecd6b6a147e400e535874947b', | ||||
|             } | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     def report_download_webpage(self, video_id): | ||||
|         """Report webpage download.""" | ||||
|         if not self._downloader.params.get('test', False): | ||||
|             self._downloader.report_warning(u'Falling back on generic information extractor.') | ||||
|             self._downloader.report_warning('Falling back on generic information extractor.') | ||||
|         super(GenericIE, self).report_download_webpage(video_id) | ||||
|  | ||||
|     def report_following_redirect(self, new_url): | ||||
|         """Report information extraction.""" | ||||
|         self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) | ||||
|         self._downloader.to_screen('[redirect] Following redirect to %s' % new_url) | ||||
|  | ||||
|     def _send_head(self, url): | ||||
|         """Check if it is a redirect, like url shorteners, in case return the new url.""" | ||||
| @@ -121,9 +197,14 @@ class GenericIE(InfoExtractor): | ||||
|                     newurl = newurl.replace(' ', '%20') | ||||
|                     newheaders = dict((k,v) for k,v in req.headers.items() | ||||
|                                       if k.lower() not in ("content-length", "content-type")) | ||||
|                     try: | ||||
|                         # This function was deprecated in python 3.3 and removed in 3.4 | ||||
|                         origin_req_host = req.get_origin_req_host() | ||||
|                     except AttributeError: | ||||
|                         origin_req_host = req.origin_req_host | ||||
|                     return HEADRequest(newurl, | ||||
|                                        headers=newheaders, | ||||
|                                        origin_req_host=req.get_origin_req_host(), | ||||
|                                        origin_req_host=origin_req_host, | ||||
|                                        unverifiable=True) | ||||
|                 else: | ||||
|                     raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) | ||||
| @@ -152,15 +233,47 @@ class GenericIE(InfoExtractor): | ||||
|  | ||||
|         response = opener.open(HEADRequest(url)) | ||||
|         if response is None: | ||||
|             raise ExtractorError(u'Invalid URL protocol') | ||||
|             raise ExtractorError('Invalid URL protocol') | ||||
|         return response | ||||
|  | ||||
|     def _extract_rss(self, url, video_id, doc): | ||||
|         playlist_title = doc.find('./channel/title').text | ||||
|         playlist_desc_el = doc.find('./channel/description') | ||||
|         playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text | ||||
|  | ||||
|         entries = [{ | ||||
|             '_type': 'url', | ||||
|             'url': e.find('link').text, | ||||
|             'title': e.find('title').text, | ||||
|         } for e in doc.findall('./channel/item')] | ||||
|  | ||||
|         return { | ||||
|             '_type': 'playlist', | ||||
|             'id': url, | ||||
|             'title': playlist_title, | ||||
|             'description': playlist_desc, | ||||
|             'entries': entries, | ||||
|         } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         parsed_url = compat_urlparse.urlparse(url) | ||||
|         if not parsed_url.scheme: | ||||
|             self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') | ||||
|             return self.url_result('http://' + url) | ||||
|         video_id = os.path.splitext(url.split('/')[-1])[0] | ||||
|             default_search = self._downloader.params.get('default_search') | ||||
|             if default_search is None: | ||||
|                 default_search = 'auto' | ||||
|  | ||||
|             if default_search == 'auto': | ||||
|                 if '/' in url: | ||||
|                     self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') | ||||
|                     return self.url_result('http://' + url) | ||||
|                 else: | ||||
|                     return self.url_result('ytsearch:' + url) | ||||
|             else: | ||||
|                 assert ':' in default_search | ||||
|                 return self.url_result(default_search + url) | ||||
|         video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0] | ||||
|  | ||||
|         self.to_screen('%s: Requesting header' % video_id) | ||||
|  | ||||
|         try: | ||||
|             response = self._send_head(url) | ||||
| @@ -184,7 +297,7 @@ class GenericIE(InfoExtractor): | ||||
|                     'formats': [{ | ||||
|                         'format_id': m.group('format_id'), | ||||
|                         'url': url, | ||||
|                         'vcodec': u'none' if m.group('type') == 'audio' else None | ||||
|                         'vcodec': 'none' if m.group('type') == 'audio' else None | ||||
|                     }], | ||||
|                     'upload_date': upload_date, | ||||
|                 } | ||||
| @@ -198,10 +311,18 @@ class GenericIE(InfoExtractor): | ||||
|         except ValueError: | ||||
|             # since this is the last-resort InfoExtractor, if | ||||
|             # this error is thrown, it'll be thrown here | ||||
|             raise ExtractorError(u'Failed to download URL: %s' % url) | ||||
|             raise ExtractorError('Failed to download URL: %s' % url) | ||||
|  | ||||
|         self.report_extraction(video_id) | ||||
|  | ||||
|         # Is it an RSS feed? | ||||
|         try: | ||||
|             doc = parse_xml(webpage) | ||||
|             if doc.tag == 'rss': | ||||
|                 return self._extract_rss(url, video_id, doc) | ||||
|         except compat_xml_parse_error: | ||||
|             pass | ||||
|  | ||||
|         # it's tempting to parse this further, but you would | ||||
|         # have to take into account all the variations like | ||||
|         #   Video Title - Site Name | ||||
| @@ -209,30 +330,41 @@ class GenericIE(InfoExtractor): | ||||
|         #   Video Title - Tagline | Site Name | ||||
|         # and so on and so forth; it's just not practical | ||||
|         video_title = self._html_search_regex( | ||||
|             r'(?s)<title>(.*?)</title>', webpage, u'video title', | ||||
|             default=u'video') | ||||
|             r'(?s)<title>(.*?)</title>', webpage, 'video title', | ||||
|             default='video') | ||||
|  | ||||
|         # video uploader is domain name | ||||
|         video_uploader = self._search_regex( | ||||
|             r'^(?:https?://)?([^/]*)/.*', url, u'video uploader') | ||||
|             r'^(?:https?://)?([^/]*)/.*', url, 'video uploader') | ||||
|  | ||||
|         # Look for BrightCove: | ||||
|         bc_url = BrightcoveIE._extract_brightcove_url(webpage) | ||||
|         if bc_url is not None: | ||||
|             self.to_screen(u'Brightcove video detected.') | ||||
|             return self.url_result(bc_url, 'Brightcove') | ||||
|         bc_urls = BrightcoveIE._extract_brightcove_urls(webpage) | ||||
|         if bc_urls: | ||||
|             self.to_screen('Brightcove video detected.') | ||||
|             entries = [{ | ||||
|                 '_type': 'url', | ||||
|                 'url': smuggle_url(bc_url, {'Referer': url}), | ||||
|                 'ie_key': 'Brightcove' | ||||
|             } for bc_url in bc_urls] | ||||
|  | ||||
|             return { | ||||
|                 '_type': 'playlist', | ||||
|                 'title': video_title, | ||||
|                 'id': video_id, | ||||
|                 'entries': entries, | ||||
|             } | ||||
|  | ||||
|         # Look for embedded (iframe) Vimeo player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage) | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage) | ||||
|         if mobj: | ||||
|             player_url = unescapeHTML(mobj.group(1)) | ||||
|             player_url = unescapeHTML(mobj.group('url')) | ||||
|             surl = smuggle_url(player_url, {'Referer': url}) | ||||
|             return self.url_result(surl, 'Vimeo') | ||||
|  | ||||
|         # Look for embedded (swf embed) Vimeo player | ||||
|         mobj = re.search( | ||||
|             r'<embed[^>]+?src="(https?://(?:www\.)?vimeo.com/moogaloop.swf.+?)"', webpage) | ||||
|             r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage) | ||||
|         if mobj: | ||||
|             return self.url_result(mobj.group(1), 'Vimeo') | ||||
|  | ||||
| @@ -271,16 +403,12 @@ class GenericIE(InfoExtractor): | ||||
|             } | ||||
|  | ||||
|         # Look for embedded blip.tv player | ||||
|         mobj = re.search(r'<meta\s[^>]*https?://api.blip.tv/\w+/redirect/\w+/(\d+)', webpage) | ||||
|         mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) | ||||
|         if mobj: | ||||
|             return self.url_result('http://blip.tv/seo/-'+mobj.group(1), 'BlipTV') | ||||
|         mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*https?://(?:\w+\.)?blip.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', webpage) | ||||
|             return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV') | ||||
|         mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage) | ||||
|         if mobj: | ||||
|             player_url = 'http://blip.tv/play/%s.x?p=1' % mobj.group(1) | ||||
|             player_page = self._download_webpage(player_url, mobj.group(1)) | ||||
|             blip_video_id = self._search_regex(r'data-episode-id="(\d+)', player_page, u'blip_video_id', fatal=False) | ||||
|             if blip_video_id: | ||||
|                 return self.url_result('http://blip.tv/seo/-'+blip_video_id, 'BlipTV') | ||||
|             return self.url_result(mobj.group(1), 'BlipTV') | ||||
|  | ||||
|         # Look for Bandcamp pages with custom domain | ||||
|         mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage) | ||||
| @@ -301,18 +429,83 @@ class GenericIE(InfoExtractor): | ||||
|             return OoyalaIE._build_url_result(mobj.group(1)) | ||||
|  | ||||
|         # Look for Aparat videos | ||||
|         mobj = re.search(r'<iframe src="(http://www.aparat.com/video/[^"]+)"', webpage) | ||||
|         mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group(1), 'Aparat') | ||||
|  | ||||
|         # Look for MPORA videos | ||||
|         mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group(1), 'Mpora') | ||||
|  | ||||
|         # Look for embedded NovaMov player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'NovaMov') | ||||
|  | ||||
|         # Look for embedded NowVideo player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?nowvideo\.(?:ch|sx|eu)/embed\.php.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'NowVideo') | ||||
|  | ||||
|         # Look for embedded Facebook player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'Facebook') | ||||
|  | ||||
|         # Look for embedded VK player | ||||
|         mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'VK') | ||||
|  | ||||
|         # Look for embedded Huffington Post player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'HuffPost') | ||||
|  | ||||
|         # Look for embed.ly | ||||
|         mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url')) | ||||
|         mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(compat_urllib_parse.unquote(mobj.group('url'))) | ||||
|  | ||||
|         # Look for funnyordie embed | ||||
|         matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage) | ||||
|         if matches: | ||||
|             urlrs = [self.url_result(unescapeHTML(eurl), 'FunnyOrDie') | ||||
|                      for eurl in matches] | ||||
|             return self.playlist_result( | ||||
|                 urlrs, playlist_id=video_id, playlist_title=video_title) | ||||
|  | ||||
|         # Look for embedded RUTV player | ||||
|         rutv_url = RUTVIE._extract_url(webpage) | ||||
|         if rutv_url: | ||||
|             return self.url_result(rutv_url, 'RUTV') | ||||
|  | ||||
|         # Start with something easy: JW Player in SWFObject | ||||
|         mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) | ||||
|         if mobj is None: | ||||
|             # Look for gorilla-vid style embedding | ||||
|             mobj = re.search(r'(?s)(?:jw_plugins|JWPlayerOptions).*?file\s*:\s*["\'](.*?)["\']', webpage) | ||||
|         if mobj is None: | ||||
|             # Broaden the search a little bit | ||||
|             mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) | ||||
|         if mobj is None: | ||||
|             # Broaden the search a little bit: JWPlayer JS loader | ||||
|             mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"]*)', webpage) | ||||
|             mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage) | ||||
|  | ||||
|         # Look for embedded TED player | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+?src=(["\'])(?P<url>http://embed\.ted\.com/.+?)\1', webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'TED') | ||||
|  | ||||
|         if mobj is None: | ||||
|             # Try to find twitter cards info | ||||
|             mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage) | ||||
| @@ -327,23 +520,39 @@ class GenericIE(InfoExtractor): | ||||
|             # HTML5 video | ||||
|             mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Unsupported URL: %s' % url) | ||||
|             mobj = re.search( | ||||
|                 r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")' | ||||
|                 r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"', | ||||
|                 webpage) | ||||
|             if mobj: | ||||
|                 new_url = mobj.group(1) | ||||
|                 self.report_following_redirect(new_url) | ||||
|                 return { | ||||
|                     '_type': 'url', | ||||
|                     'url': new_url, | ||||
|                 } | ||||
|         if mobj is None: | ||||
|             raise ExtractorError('Unsupported URL: %s' % url) | ||||
|  | ||||
|         # It's possible that one of the regexes | ||||
|         # matched, but returned an empty group: | ||||
|         if mobj.group(1) is None: | ||||
|             raise ExtractorError(u'Did not find a valid video URL at %s' % url) | ||||
|             raise ExtractorError('Did not find a valid video URL at %s' % url) | ||||
|  | ||||
|         video_url = mobj.group(1) | ||||
|         video_url = compat_urlparse.urljoin(url, video_url) | ||||
|         video_id = compat_urllib_parse.unquote(os.path.basename(video_url)) | ||||
|  | ||||
|         # Sometimes, jwplayer extraction will result in a YouTube URL | ||||
|         if YoutubeIE.suitable(video_url): | ||||
|             return self.url_result(video_url, 'Youtube') | ||||
|  | ||||
|         # here's a fun little line of code for you: | ||||
|         video_id = os.path.splitext(video_id)[0] | ||||
|  | ||||
|         return { | ||||
|             'id':       video_id, | ||||
|             'url':      video_url, | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'uploader': video_uploader, | ||||
|             'title':    video_title, | ||||
|             'title': video_title, | ||||
|         } | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import datetime | ||||
| import re | ||||
| @@ -10,32 +11,28 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class GooglePlusIE(InfoExtractor): | ||||
|     IE_DESC = u'Google Plus' | ||||
|     _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)' | ||||
|     IE_NAME = u'plus.google' | ||||
|     IE_DESC = 'Google Plus' | ||||
|     _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)' | ||||
|     IE_NAME = 'plus.google' | ||||
|     _TEST = { | ||||
|         u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH", | ||||
|         u"file": u"ZButuJc6CtH.flv", | ||||
|         u"info_dict": { | ||||
|             u"upload_date": u"20120613", | ||||
|             u"uploader": u"井上ヨシマサ", | ||||
|             u"title": u"嘆きの天使 降臨" | ||||
|         'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH', | ||||
|         'info_dict': { | ||||
|             'id': 'ZButuJc6CtH', | ||||
|             'ext': 'flv', | ||||
|             'upload_date': '20120613', | ||||
|             'uploader': '井上ヨシマサ', | ||||
|             'title': '嘆きの天使 降臨', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         # Extract id from URL | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|  | ||||
|         post_url = mobj.group(0) | ||||
|         video_id = mobj.group(1) | ||||
|  | ||||
|         video_extension = 'flv' | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         # Step 1, Retrieve post webpage to extract further information | ||||
|         webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage') | ||||
|         webpage = self._download_webpage(url, video_id, 'Downloading entry webpage') | ||||
|  | ||||
|         self.report_extraction(video_id) | ||||
|  | ||||
| @@ -43,7 +40,7 @@ class GooglePlusIE(InfoExtractor): | ||||
|         upload_date = self._html_search_regex( | ||||
|             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*> | ||||
|                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''', | ||||
|             webpage, u'upload date', fatal=False, flags=re.VERBOSE) | ||||
|             webpage, 'upload date', fatal=False, flags=re.VERBOSE) | ||||
|         if upload_date: | ||||
|             # Convert timestring to a format suitable for filename | ||||
|             upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") | ||||
| @@ -51,28 +48,27 @@ class GooglePlusIE(InfoExtractor): | ||||
|  | ||||
|         # Extract uploader | ||||
|         uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>', | ||||
|             webpage, u'uploader', fatal=False) | ||||
|             webpage, 'uploader', fatal=False) | ||||
|  | ||||
|         # Extract title | ||||
|         # Get the first line for title | ||||
|         video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]', | ||||
|             webpage, 'title', default=u'NA') | ||||
|             webpage, 'title', default='NA') | ||||
|  | ||||
|         # Step 2, Simulate clicking the image box to launch video | ||||
|         DOMAIN = 'https://plus.google.com/' | ||||
|         video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN), | ||||
|             webpage, u'video page URL') | ||||
|             webpage, 'video page URL') | ||||
|         if not video_page.startswith(DOMAIN): | ||||
|             video_page = DOMAIN + video_page | ||||
|  | ||||
|         webpage = self._download_webpage(video_page, video_id, u'Downloading video page') | ||||
|         webpage = self._download_webpage(video_page, video_id, 'Downloading video page') | ||||
|  | ||||
|         # Extract video links on video page | ||||
|         """Extract video links of all sizes""" | ||||
|         # Extract video links all sizes | ||||
|         pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' | ||||
|         mobj = re.findall(pattern, webpage) | ||||
|         if len(mobj) == 0: | ||||
|             raise ExtractorError(u'Unable to extract video links') | ||||
|             raise ExtractorError('Unable to extract video links') | ||||
|  | ||||
|         # Sort in resolution | ||||
|         links = sorted(mobj) | ||||
| @@ -87,12 +83,11 @@ class GooglePlusIE(InfoExtractor): | ||||
|         except AttributeError: # Python 3 | ||||
|             video_url = bytes(video_url, 'ascii').decode('unicode-escape') | ||||
|  | ||||
|  | ||||
|         return [{ | ||||
|             'id':       video_id, | ||||
|             'url':      video_url, | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'uploader': uploader, | ||||
|             'upload_date':  upload_date, | ||||
|             'title':    video_title, | ||||
|             'ext':      video_extension, | ||||
|         }] | ||||
|             'upload_date': upload_date, | ||||
|             'title': video_title, | ||||
|             'ext': 'flv', | ||||
|         } | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import itertools | ||||
| import re | ||||
|  | ||||
| @@ -8,32 +10,42 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class GoogleSearchIE(SearchInfoExtractor): | ||||
|     IE_DESC = u'Google Video search' | ||||
|     _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' | ||||
|     IE_DESC = 'Google Video search' | ||||
|     _MAX_RESULTS = 1000 | ||||
|     IE_NAME = u'video.google:search' | ||||
|     IE_NAME = 'video.google:search' | ||||
|     _SEARCH_KEY = 'gvsearch' | ||||
|  | ||||
|     def _get_n_results(self, query, n): | ||||
|         """Get a specified number of results for a query""" | ||||
|  | ||||
|         entries = [] | ||||
|         res = { | ||||
|             '_type': 'playlist', | ||||
|             'id': query, | ||||
|             'entries': [] | ||||
|             'title': query, | ||||
|         } | ||||
|  | ||||
|         for pagenum in itertools.count(1): | ||||
|             result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) | ||||
|             webpage = self._download_webpage(result_url, u'gvsearch:' + query, | ||||
|                                              note='Downloading result page ' + str(pagenum)) | ||||
|         for pagenum in itertools.count(): | ||||
|             result_url = ( | ||||
|                 'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' | ||||
|                 % (compat_urllib_parse.quote_plus(query), pagenum * 10)) | ||||
|  | ||||
|             for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage): | ||||
|                 e = { | ||||
|             webpage = self._download_webpage( | ||||
|                 result_url, 'gvsearch:' + query, | ||||
|                 note='Downloading result page ' + str(pagenum + 1)) | ||||
|  | ||||
|             for hit_idx, mobj in enumerate(re.finditer( | ||||
|                     r'<h3 class="r"><a href="([^"]+)"', webpage)): | ||||
|  | ||||
|                 # Skip playlists | ||||
|                 if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage): | ||||
|                     continue | ||||
|  | ||||
|                 entries.append({ | ||||
|                     '_type': 'url', | ||||
|                     'url': mobj.group(1) | ||||
|                 } | ||||
|                 res['entries'].append(e) | ||||
|                 }) | ||||
|  | ||||
|             if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): | ||||
|             if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage): | ||||
|                 res['entries'] = entries[:n] | ||||
|                 return res | ||||
|   | ||||
							
								
								
									
										62
									
								
								youtube_dl/extractor/helsinki.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								youtube_dl/extractor/helsinki.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,62 @@ | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class HelsinkiIE(InfoExtractor): | ||||
|     IE_DESC = 'helsinki.fi' | ||||
|     _VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)' | ||||
|     _TEST = { | ||||
|         'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258', | ||||
|         'info_dict': { | ||||
|             'id': '20258', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Tietotekniikkafoorumi-iltapäivä', | ||||
|             'description': 'md5:f5c904224d43c133225130fe156a5ee0', | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': True,  # RTMP | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         formats = [] | ||||
|  | ||||
|         mobj = re.search(r'file=((\w+):[^&]+)', webpage) | ||||
|         if mobj: | ||||
|             formats.append({ | ||||
|                 'ext': mobj.group(2), | ||||
|                 'play_path': mobj.group(1), | ||||
|                 'url': 'rtmp://flashvideo.it.helsinki.fi/vod/', | ||||
|                 'player_url': 'http://video.helsinki.fi/player.swf', | ||||
|                 'format_note': 'sd', | ||||
|                 'quality': 0, | ||||
|             }) | ||||
|  | ||||
|         mobj = re.search(r'hd\.file=((\w+):[^&]+)', webpage) | ||||
|         if mobj: | ||||
|             formats.append({ | ||||
|                 'ext': mobj.group(2), | ||||
|                 'play_path': mobj.group(1), | ||||
|                 'url': 'rtmp://flashvideo.it.helsinki.fi/vod/', | ||||
|                 'player_url': 'http://video.helsinki.fi/player.swf', | ||||
|                 'format_note': 'hd', | ||||
|                 'quality': 1, | ||||
|             }) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': self._og_search_title(webpage).replace('Video: ', ''), | ||||
|             'description': self._og_search_description(webpage), | ||||
|             'thumbnail': self._og_search_thumbnail(webpage), | ||||
|             'formats': formats, | ||||
|         } | ||||
| @@ -1,17 +1,25 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import base64 | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_request, | ||||
|     ExtractorError, | ||||
|     HEADRequest, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class HotNewHipHopIE(InfoExtractor): | ||||
|     _VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html' | ||||
|     _VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P<id>.*)\.html' | ||||
|     _TEST = { | ||||
|         u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html", | ||||
|         u'file': u'1435540.mp3', | ||||
|         u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96', | ||||
|         u'info_dict': { | ||||
|             u"title": u'Freddie Gibbs "Lay It Down"' | ||||
|         'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html', | ||||
|         'file': '1435540.mp3', | ||||
|         'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96', | ||||
|         'info_dict': { | ||||
|             'title': 'Freddie Gibbs - Lay It Down' | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -21,24 +29,41 @@ class HotNewHipHopIE(InfoExtractor): | ||||
|  | ||||
|         webpage_src = self._download_webpage(url, video_id) | ||||
|  | ||||
|         video_url_base64 = self._search_regex(r'data-path="(.*?)"', | ||||
|             webpage_src, u'video URL', fatal=False) | ||||
|         video_url_base64 = self._search_regex( | ||||
|             r'data-path="(.*?)"', webpage_src, u'video URL', fatal=False) | ||||
|  | ||||
|         if video_url_base64 == None: | ||||
|             video_url = self._search_regex(r'"contentUrl" content="(.*?)"', webpage_src, | ||||
|                 u'video URL') | ||||
|         if video_url_base64 is None: | ||||
|             video_url = self._search_regex( | ||||
|                 r'"contentUrl" content="(.*?)"', webpage_src, u'video URL') | ||||
|             return self.url_result(video_url, ie='Youtube') | ||||
|  | ||||
|         video_url = base64.b64decode(video_url_base64).decode('utf-8') | ||||
|         reqdata = compat_urllib_parse.urlencode([ | ||||
|             ('mediaType', 's'), | ||||
|             ('mediaId', video_id), | ||||
|         ]) | ||||
|         r = compat_urllib_request.Request( | ||||
|             'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata) | ||||
|         r.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         mkd = self._download_json( | ||||
|             r, video_id, note='Requesting media key', | ||||
|             errnote='Could not download media key') | ||||
|         if 'mediaKey' not in mkd: | ||||
|             raise ExtractorError('Did not get a media key') | ||||
|  | ||||
|         video_title = self._html_search_regex(r"<title>(.*)</title>", | ||||
|             webpage_src, u'title') | ||||
|         redirect_url = base64.b64decode(video_url_base64).decode('utf-8') | ||||
|         redirect_req = HEADRequest(redirect_url) | ||||
|         req = self._request_webpage( | ||||
|             redirect_req, video_id, | ||||
|             note='Resolving final URL', errnote='Could not resolve final URL') | ||||
|         video_url = req.geturl() | ||||
|         if video_url.endswith('.html'): | ||||
|             raise ExtractorError('Redirect failed') | ||||
|  | ||||
|         results = [{ | ||||
|                     'id': video_id, | ||||
|                     'url' : video_url, | ||||
|                     'title' : video_title, | ||||
|                     'thumbnail' : self._og_search_thumbnail(webpage_src), | ||||
|                     'ext' : 'mp3', | ||||
|                     }] | ||||
|         return results | ||||
|         video_title = self._og_search_title(webpage_src).strip() | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': video_title, | ||||
|             'thumbnail': self._og_search_thumbnail(webpage_src), | ||||
|         } | ||||
|   | ||||
| @@ -1,17 +1,20 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class HowcastIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)' | ||||
|     _VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly', | ||||
|         u'file': u'390161.mp4', | ||||
|         u'md5': u'8b743df908c42f60cf6496586c7f12c3', | ||||
|         u'info_dict': { | ||||
|             u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",  | ||||
|             u"title": u"How to Tie a Square Knot Properly" | ||||
|         'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly', | ||||
|         'md5': '8b743df908c42f60cf6496586c7f12c3', | ||||
|         'info_dict': { | ||||
|             'id': '390161', | ||||
|             'ext': 'mp4', | ||||
|             'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',  | ||||
|             'title': 'How to Tie a Square Knot Properly', | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -24,22 +27,15 @@ class HowcastIE(InfoExtractor): | ||||
|         self.report_extraction(video_id) | ||||
|  | ||||
|         video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)', | ||||
|             webpage, u'video URL') | ||||
|  | ||||
|         video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'', | ||||
|             webpage, u'title') | ||||
|             webpage, 'video URL') | ||||
|  | ||||
|         video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', | ||||
|             webpage, u'description', fatal=False) | ||||
|             webpage, 'description', fatal=False) | ||||
|  | ||||
|         thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'', | ||||
|             webpage, u'thumbnail', fatal=False) | ||||
|  | ||||
|         return [{ | ||||
|             'id':       video_id, | ||||
|             'url':      video_url, | ||||
|             'ext':      'mp4', | ||||
|             'title':    video_title, | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': self._og_search_title(webpage), | ||||
|             'description': video_description, | ||||
|             'thumbnail': thumbnail, | ||||
|         }] | ||||
|             'thumbnail': self._og_search_thumbnail(webpage), | ||||
|         } | ||||
|   | ||||
							
								
								
									
										82
									
								
								youtube_dl/extractor/huffpost.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								youtube_dl/extractor/huffpost.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,82 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     parse_duration, | ||||
|     unified_strdate, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class HuffPostIE(InfoExtractor): | ||||
|     IE_DESC = 'Huffington Post' | ||||
|     _VALID_URL = r'''(?x) | ||||
|         https?://(embed\.)?live\.huffingtonpost\.com/ | ||||
|         (?: | ||||
|             r/segment/[^/]+/| | ||||
|             HPLEmbedPlayer/\?segmentId= | ||||
|         ) | ||||
|         (?P<id>[0-9a-f]+)''' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677', | ||||
|         'file': '52dd3e4b02a7602131000677.mp4', | ||||
|         'md5': '55f5e8981c1c80a64706a44b74833de8', | ||||
|         'info_dict': { | ||||
|             'title': 'Legalese It! with @MikeSacksHP', | ||||
|             'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more.  ', | ||||
|             'duration': 1549, | ||||
|             'upload_date': '20140124', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id | ||||
|         data = self._download_json(api_url, video_id)['data'] | ||||
|  | ||||
|         video_title = data['title'] | ||||
|         duration = parse_duration(data['running_time']) | ||||
|         upload_date = unified_strdate(data['schedule']['starts_at']) | ||||
|         description = data.get('description') | ||||
|  | ||||
|         thumbnails = [] | ||||
|         for url in data['images'].values(): | ||||
|             m = re.match('.*-([0-9]+x[0-9]+)\.', url) | ||||
|             if not m: | ||||
|                 continue | ||||
|             thumbnails.append({ | ||||
|                 'url': url, | ||||
|                 'resolution': m.group(1), | ||||
|             }) | ||||
|  | ||||
|         formats = [{ | ||||
|             'format': key, | ||||
|             'format_id': key.replace('/', '.'), | ||||
|             'ext': 'mp4', | ||||
|             'url': url, | ||||
|             'vcodec': 'none' if key.startswith('audio/') else None, | ||||
|         } for key, url in data['sources']['live'].items()] | ||||
|         if data.get('fivemin_id'): | ||||
|             fid = data['fivemin_id'] | ||||
|             fcat = str(int(fid) // 100 + 1) | ||||
|             furl = 'http://avideos.5min.com/2/' + fcat[-3:] + '/' + fcat + '/' + fid + '.mp4' | ||||
|             formats.append({ | ||||
|                 'format': 'fivemin', | ||||
|                 'url': furl, | ||||
|                 'preference': 1, | ||||
|             }) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': video_title, | ||||
|             'description': description, | ||||
|             'formats': formats, | ||||
|             'duration': duration, | ||||
|             'upload_date': upload_date, | ||||
|             'thumbnails': thumbnails, | ||||
|         } | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| import json | ||||
|  | ||||
| @@ -9,18 +11,18 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class ImdbIE(InfoExtractor): | ||||
|     IE_NAME = u'imdb' | ||||
|     IE_DESC = u'Internet Movie Database trailers' | ||||
|     IE_NAME = 'imdb' | ||||
|     IE_DESC = 'Internet Movie Database trailers' | ||||
|     _VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)' | ||||
|  | ||||
|     _TEST = { | ||||
|         u'url': u'http://www.imdb.com/video/imdb/vi2524815897', | ||||
|         u'md5': u'9f34fa777ade3a6e57a054fdbcb3a068', | ||||
|         u'info_dict': { | ||||
|             u'id': u'2524815897', | ||||
|             u'ext': u'mp4', | ||||
|             u'title': u'Ice Age: Continental Drift Trailer (No. 2) - IMDb', | ||||
|             u'description': u'md5:9061c2219254e5d14e03c25c98e96a81', | ||||
|         'url': 'http://www.imdb.com/video/imdb/vi2524815897', | ||||
|         'md5': '9f34fa777ade3a6e57a054fdbcb3a068', | ||||
|         'info_dict': { | ||||
|             'id': '2524815897', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Ice Age: Continental Drift Trailer (No. 2) - IMDb', | ||||
|             'description': 'md5:9061c2219254e5d14e03c25c98e96a81', | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -37,10 +39,10 @@ class ImdbIE(InfoExtractor): | ||||
|             f_path = f_path.strip() | ||||
|             format_page = self._download_webpage( | ||||
|                 compat_urlparse.urljoin(url, f_path), | ||||
|                 u'Downloading info for %s format' % f_id) | ||||
|                 'Downloading info for %s format' % f_id) | ||||
|             json_data = self._search_regex( | ||||
|                 r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>', | ||||
|                 format_page, u'json data', flags=re.DOTALL) | ||||
|                 format_page, 'json data', flags=re.DOTALL) | ||||
|             info = json.loads(json_data) | ||||
|             format_info = info['videoPlayerObject']['video'] | ||||
|             formats.append({ | ||||
| @@ -55,3 +57,23 @@ class ImdbIE(InfoExtractor): | ||||
|             'description': descr, | ||||
|             'thumbnail': format_info['slate'], | ||||
|         } | ||||
|  | ||||
|  | ||||
| class ImdbListIE(InfoExtractor): | ||||
|     IE_NAME = 'imdb:list' | ||||
|     IE_DESC = 'Internet Movie Database lists' | ||||
|     _VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})' | ||||
|      | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         list_id = mobj.group('id') | ||||
|  | ||||
|         webpage = self._download_webpage(url, list_id) | ||||
|         entries = [ | ||||
|             self.url_result('http://www.imdb.com' + m, 'Imdb') | ||||
|             for m in re.findall(r'href="(/video/imdb/vi[^"]+)"\s+data-type="playlist"', webpage)] | ||||
|  | ||||
|         list_title = self._html_search_regex( | ||||
|             r'<h1 class="header">(.*?)</h1>', webpage, 'list title') | ||||
|  | ||||
|         return self.playlist_result(entries, list_id, list_title) | ||||
|   | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user