Compare commits
	
		
			1017 Commits
		
	
	
		
			2011.02.25
			...
			2013.04.22
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					8cb94542f4 | ||
| 
						 | 
					c681a03918 | ||
| 
						 | 
					74e3452b9e | ||
| 
						 | 
					9e1cf0c200 | ||
| 
						 | 
					e11eb11906 | ||
| 
						 | 
					c04bca6f60 | ||
| 
						 | 
					41a6eb949a | ||
| 
						 | 
					f17ce13a92 | ||
| 
						 | 
					8c416ad29a | ||
| 
						 | 
					c72938240e | ||
| 
						 | 
					e905b6f80e | ||
| 
						 | 
					6de8f1afb7 | ||
| 
						 | 
					9341212642 | ||
| 
						 | 
					f7a9721e16 | ||
| 
						 | 
					089e843b0f | ||
| 
						 | 
					c8056d866a | ||
| 
						 | 
					49da66e459 | ||
| 
						 | 
					fb6c319904 | ||
| 
						 | 
					5a8d13199c | ||
| 
						 | 
					dce9027045 | ||
| 
						 | 
					feba604e92 | ||
| 
						 | 
					d22f65413a | ||
| 
						 | 
					0599ef8c08 | ||
| 
						 | 
					bfdf469295 | ||
| 
						 | 
					32c96387c1 | ||
| 
						 | 
					c8c5443bb5 | ||
| 
						 | 
					a60b854d90 | ||
| 
						 | 
					d281274bf2 | ||
| 
						 | 
					b625bc2c31 | ||
| 
						 | 
					f4381ab88a | ||
| 
						 | 
					744435f2a4 | ||
| 
						 | 
					855703e55e | ||
| 
						 | 
					927c8c4924 | ||
| 
						 | 
					0ba994e9e3 | ||
| 
						 | 
					af9ad45cd4 | ||
| 
						 | 
					e0fee250c3 | ||
| 
						 | 
					72ca05016d | ||
| 
						 | 
					844d1f9fa1 | ||
| 
						 | 
					213c31ae16 | ||
| 
						 | 
					04f3d551a0 | ||
| 
						 | 
					e8600d69fd | ||
| 
						 | 
					b03d65c237 | ||
| 
						 | 
					8743974189 | ||
| 
						 | 
					dc36bc9434 | ||
| 
						 | 
					bce878a7c1 | ||
| 
						 | 
					532d797824 | ||
| 
						 | 
					146c12a2da | ||
| 
						 | 
					d39919c03e | ||
| 
						 | 
					df2dedeefb | ||
| 
						 | 
					adb029ed81 | ||
| 
						 | 
					43ff1a347d | ||
| 
						 | 
					14294236bf | ||
| 
						 | 
					c2b293ba30 | ||
| 
						 | 
					37cd9f522f | ||
| 
						 | 
					f33154cd39 | ||
| 
						 | 
					bafeed9f5d | ||
| 
						 | 
					ef767f9fd5 | ||
| 
						 | 
					bc97f6d60c | ||
| 
						 | 
					90a99c1b5e | ||
| 
						 | 
					f375d4b7de | ||
| 
						 | 
					fa41fbd318 | ||
| 
						 | 
					6a205c8876 | ||
| 
						 | 
					0fb3756409 | ||
| 
						 | 
					fbbdf475b1 | ||
| 
						 | 
					c238be3e3a | ||
| 
						 | 
					1bf2801e6a | ||
| 
						 | 
					c9c8402093 | ||
| 
						 | 
					6060788083 | ||
| 
						 | 
					e3700fc9e4 | ||
| 
						 | 
					b693216d8d | ||
| 
						 | 
					46b9d8295d | ||
| 
						 | 
					7decf8951c | ||
| 
						 | 
					1f46c15262 | ||
| 
						 | 
					0cd358676c | ||
| 
						 | 
					43113d92cc | ||
| 
						 | 
					7eab8dc750 | ||
| 
						 | 
					44e939514e | ||
| 
						 | 
					95506f1235 | ||
| 
						 | 
					a91556fd74 | ||
| 
						 | 
					1447f728b5 | ||
| 
						 | 
					d2c690828a | ||
| 
						 | 
					cfa90f4adc | ||
| 
						 | 
					898280a056 | ||
| 
						 | 
					59b4a2f0e4 | ||
| 
						 | 
					1ee9778405 | ||
| 
						 | 
					db74c11d2b | ||
| 
						 | 
					5011cded16 | ||
| 
						 | 
					f10b2a9c14 | ||
| 
						 | 
					5cb3c0b319 | ||
| 
						 | 
					b9fc428494 | ||
| 
						 | 
					c0ba104674 | ||
| 
						 | 
					2a4093eaf3 | ||
| 
						 | 
					9e62bc4439 | ||
| 
						 | 
					553d097442 | ||
| 
						 | 
					ae608b8076 | ||
| 
						 | 
					c397187061 | ||
| 
						 | 
					e32b06e977 | ||
| 
						 | 
					8c42c506cd | ||
| 
						 | 
					8cc83b8dbe | ||
| 
						 | 
					51af426d89 | ||
| 
						 | 
					08ec0af7c6 | ||
| 
						 | 
					3b221c5406 | ||
| 
						 | 
					3d3423574d | ||
| 
						 | 
					e5edd51de4 | ||
| 
						 | 
					64c78d50cc | ||
| 
						 | 
					b3bcca0844 | ||
| 
						 | 
					61e40c88a9 | ||
| 
						 | 
					40634747f7 | ||
| 
						 | 
					c2e21f2f0d | ||
| 
						 | 
					47dcd621c0 | ||
| 
						 | 
					a0d6fe7b92 | ||
| 
						 | 
					c9fa1cbab6 | ||
| 
						 | 
					8a38a194fb | ||
| 
						 | 
					6ac7f082c4 | ||
| 
						 | 
					f6e6da9525 | ||
| 
						 | 
					597cc8a455 | ||
| 
						 | 
					3370abd509 | ||
| 
						 | 
					631f73978c | ||
| 
						 | 
					e5f30ade10 | ||
| 
						 | 
					6622d22c79 | ||
| 
						 | 
					4e1582f372 | ||
| 
						 | 
					967897fd22 | ||
| 
						 | 
					f918ec7ea2 | ||
| 
						 | 
					a2ae43a55f | ||
| 
						 | 
					7ae153ee9c | ||
| 
						 | 
					f7b567ff84 | ||
| 
						 | 
					f2e237adc8 | ||
| 
						 | 
					2e5457be1d | ||
| 
						 | 
					7f9d41a55e | ||
| 
						 | 
					8207626bbe | ||
| 
						 | 
					df8db1aa21 | ||
| 
						 | 
					691db5ba02 | ||
| 
						 | 
					acb8752f80 | ||
| 
						 | 
					679790eee1 | ||
| 
						 | 
					6bf48bd866 | ||
| 
						 | 
					790d4fcbe1 | ||
| 
						 | 
					89de9eb125 | ||
| 
						 | 
					6324fd1d74 | ||
| 
						 | 
					9e07cf2955 | ||
| 
						 | 
					f03b88b3fb | ||
| 
						 | 
					97d0365f49 | ||
| 
						 | 
					12887875a2 | ||
| 
						 | 
					450e709972 | ||
| 
						 | 
					9befce2b8c | ||
| 
						 | 
					cb99797798 | ||
| 
						 | 
					f82b28146a | ||
| 
						 | 
					4dc72b830c | ||
| 
						 | 
					ea05129ebd | ||
| 
						 | 
					35d217133f | ||
| 
						 | 
					d1b7a24354 | ||
| 
						 | 
					c85538dba1 | ||
| 
						 | 
					60bd48b175 | ||
| 
						 | 
					4be0aa3539 | ||
| 
						 | 
					f636c34481 | ||
| 
						 | 
					3bf79c752e | ||
| 
						 | 
					cdb130b09a | ||
| 
						 | 
					2e5d60b7db | ||
| 
						 | 
					8271226a55 | ||
| 
						 | 
					1013186a17 | ||
| 
						 | 
					7c038b3c32 | ||
| 
						 | 
					c8cd8e5f55 | ||
| 
						 | 
					471cf47796 | ||
| 
						 | 
					d8f64574a4 | ||
| 
						 | 
					e711babbd1 | ||
| 
						 | 
					a72b0f2b6f | ||
| 
						 | 
					434eb6f26b | ||
| 
						 | 
					197080b10b | ||
| 
						 | 
					7796e8c2cb | ||
| 
						 | 
					6d4363368a | ||
| 
						 | 
					414638cd50 | ||
| 
						 | 
					2a9983b78f | ||
| 
						 | 
					b17c974a88 | ||
| 
						 | 
					5717d91ab7 | ||
| 
						 | 
					79eb0287ab | ||
| 
						 | 
					58994225bc | ||
| 
						 | 
					59d4c2fe1b | ||
| 
						 | 
					3a468f2d8b | ||
| 
						 | 
					1ad5d872b9 | ||
| 
						 | 
					355fc8e944 | ||
| 
						 | 
					380a29dbf7 | ||
| 
						 | 
					1528d6642d | ||
| 
						 | 
					7311fef854 | ||
| 
						 | 
					906417c7c5 | ||
| 
						 | 
					6aabe82035 | ||
| 
						 | 
					f0877a445e | ||
| 
						 | 
					da06e2daf8 | ||
| 
						 | 
					d3f5f9f6b9 | ||
| 
						 | 
					bfc6ea7935 | ||
| 
						 | 
					8edc2cf8ca | ||
| 
						 | 
					fb778e66df | ||
| 
						 | 
					3a9918d37f | ||
| 
						 | 
					ccb0cae134 | ||
| 
						 | 
					085c8b75a6 | ||
| 
						 | 
					dbf2ba3d61 | ||
| 
						 | 
					b47bbac393 | ||
| 
						 | 
					229cac754a | ||
| 
						 | 
					0e33684194 | ||
| 
						 | 
					9e982f9e4e | ||
| 
						 | 
					c7a725cfad | ||
| 
						 | 
					450a30cae8 | ||
| 
						 | 
					9cd5e4fce8 | ||
| 
						 | 
					edba5137b8 | ||
| 
						 | 
					233a22960a | ||
| 
						 | 
					3b024e17af | ||
| 
						 | 
					a32b573ccb | ||
| 
						 | 
					ec71c13ab8 | ||
| 
						 | 
					f0bad2b026 | ||
| 
						 | 
					25580f3251 | ||
| 
						 | 
					da4de959df | ||
| 
						 | 
					d0d51a8afa | ||
| 
						 | 
					c67598c3e1 | ||
| 
						 | 
					811d253bc2 | ||
| 
						 | 
					c3a1642ead | ||
| 
						 | 
					ccf65f9dee | ||
| 
						 | 
					b954070d70 | ||
| 
						 | 
					30e9f4496b | ||
| 
						 | 
					271d3fbdaa | ||
| 
						 | 
					6df40dcbe0 | ||
| 
						 | 
					97f194c1fb | ||
| 
						 | 
					4da769ccca | ||
| 
						 | 
					253d96f2e2 | ||
| 
						 | 
					bbc3e2753a | ||
| 
						 | 
					67353612ba | ||
| 
						 | 
					bffbd5f038 | ||
| 
						 | 
					d8bbf2018e | ||
| 
						 | 
					187f491ad2 | ||
| 
						 | 
					335959e778 | ||
| 
						 | 
					3b83bf8f6a | ||
| 
						 | 
					51719893bf | ||
| 
						 | 
					1841f65e64 | ||
| 
						 | 
					bb28998920 | ||
| 
						 | 
					fbc5f99db9 | ||
| 
						 | 
					ca0a0bbeec | ||
| 
						 | 
					6119f78cb9 | ||
| 
						 | 
					539679c7f9 | ||
| 
						 | 
					b642cd44c1 | ||
| 
						 | 
					fffec3b9d9 | ||
| 
						 | 
					3446dfb7cb | ||
| 
						 | 
					db16276b7c | ||
| 
						 | 
					629fcdd135 | ||
| 
						 | 
					64ce2aada8 | ||
| 
						 | 
					565f751967 | ||
| 
						 | 
					6017964580 | ||
| 
						 | 
					1d16b0c3fe | ||
| 
						 | 
					7851b37993 | ||
| 
						 | 
					d81edc573e | ||
| 
						 | 
					ef0c8d5f9f | ||
| 
						 | 
					db30f02b50 | ||
| 
						 | 
					4ba7262467 | ||
| 
						 | 
					67d0c25eab | ||
| 
						 | 
					09f9552b40 | ||
| 
						 | 
					142d38f776 | ||
| 
						 | 
					6dd3471900 | ||
| 
						 | 
					280d67896a | ||
| 
						 | 
					510e6f6dc1 | ||
| 
						 | 
					712e86b999 | ||
| 
						 | 
					74fdba620d | ||
| 
						 | 
					dc1c479a6f | ||
| 
						 | 
					119d536e07 | ||
| 
						 | 
					fa1bf9c653 | ||
| 
						 | 
					814eed0ea1 | ||
| 
						 | 
					0aa3068e9e | ||
| 
						 | 
					db2d6124b1 | ||
| 
						 | 
					039dc61bd2 | ||
| 
						 | 
					4b879984ea | ||
| 
						 | 
					55e286ba55 | ||
| 
						 | 
					9450bfa26e | ||
| 
						 | 
					18be482a6f | ||
| 
						 | 
					ca6710ee41 | ||
| 
						 | 
					9314810243 | ||
| 
						 | 
					7717ae19fa | ||
| 
						 | 
					32635ec685 | ||
| 
						 | 
					caec7618a1 | ||
| 
						 | 
					7e7ab2815c | ||
| 
						 | 
					d7744f2219 | ||
| 
						 | 
					7161829de5 | ||
| 
						 | 
					991ba7fae3 | ||
| 
						 | 
					a7539296ce | ||
| 
						 | 
					258d5850c9 | ||
| 
						 | 
					20759b340a | ||
| 
						 | 
					8e5f761870 | ||
| 
						 | 
					26714799c9 | ||
| 
						 | 
					5e9d042d8f | ||
| 
						 | 
					9cf98a2bcc | ||
| 
						 | 
					f5ebb61495 | ||
| 
						 | 
					431d88dd31 | ||
| 
						 | 
					876f1a86af | ||
| 
						 | 
					01951dda7a | ||
| 
						 | 
					6e3dba168b | ||
| 
						 | 
					d851e895d5 | ||
| 
						 | 
					b962b76f43 | ||
| 
						 | 
					26cf040827 | ||
| 
						 | 
					8e241d1a1a | ||
| 
						 | 
					3a648b209c | ||
| 
						 | 
					c80f0a417a | ||
| 
						 | 
					4fcca4bb18 | ||
| 
						 | 
					511eda8eda | ||
| 
						 | 
					5f9551719c | ||
| 
						 | 
					d830b7c297 | ||
| 
						 | 
					1c256f7047 | ||
| 
						 | 
					a34dd63beb | ||
| 
						 | 
					4aeae91f86 | ||
| 
						 | 
					c073e35b1e | ||
| 
						 | 
					5c892b0ba9 | ||
| 
						 | 
					6985325e01 | ||
| 
						 | 
					911ee27e83 | ||
| 
						 | 
					2069acc6a4 | ||
| 
						 | 
					278986ea0f | ||
| 
						 | 
					6535e9511f | ||
| 
						 | 
					60c7520a51 | ||
| 
						 | 
					deb594a9a0 | ||
| 
						 | 
					e314ba675b | ||
| 
						 | 
					0214ce7c75 | ||
| 
						 | 
					95fedbf86b | ||
| 
						 | 
					b7769a05ec | ||
| 
						 | 
					067f6a3536 | ||
| 
						 | 
					8cad53e84c | ||
| 
						 | 
					d5ed35b664 | ||
| 
						 | 
					f427df17ab | ||
| 
						 | 
					4e38899e97 | ||
| 
						 | 
					cb6ff87fbb | ||
| 
						 | 
					0deac3a2d8 | ||
| 
						 | 
					92e3e18a1d | ||
| 
						 | 
					3bb6165927 | ||
| 
						 | 
					d0d4f277da | ||
| 
						 | 
					99b0a1292b | ||
| 
						 | 
					dc23886a77 | ||
| 
						 | 
					b7298b6e2a | ||
| 
						 | 
					3e6c3f52a9 | ||
| 
						 | 
					0c0074328b | ||
| 
						 | 
					f0648fc18c | ||
| 
						 | 
					a7c0f8602e | ||
| 
						 | 
					21a9c6aaac | ||
| 
						 | 
					162e3c5261 | ||
| 
						 | 
					6b3aef80ce | ||
| 
						 | 
					77c4beab8a | ||
| 
						 | 
					1a2c3c0f3e | ||
| 
						 | 
					0eaf520d77 | ||
| 
						 | 
					056d857571 | ||
| 
						 | 
					69a3883199 | ||
| 
						 | 
					0dcfb234ed | ||
| 
						 | 
					43e8fafd49 | ||
| 
						 | 
					314d506b96 | ||
| 
						 | 
					af42895612 | ||
| 
						 | 
					bfa6389b74 | ||
| 
						 | 
					9b14f51a3e | ||
| 
						 | 
					f4bfd65ff2 | ||
| 
						 | 
					3cc687d486 | ||
| 
						 | 
					cdb3076445 | ||
| 
						 | 
					8a2f13c304 | ||
| 
						 | 
					77bd7968ea | ||
| 
						 | 
					993693aa79 | ||
| 
						 | 
					ce4be3a91d | ||
| 
						 | 
					937021133f | ||
| 
						 | 
					f7b111b7d1 | ||
| 
						 | 
					80d3177e5c | ||
| 
						 | 
					5e5ddcfbcf | ||
| 
						 | 
					5910e210f4 | ||
| 
						 | 
					b375c8b946 | ||
| 
						 | 
					88f6c78b02 | ||
| 
						 | 
					4096b60948 | ||
| 
						 | 
					2ab1c5ed1a | ||
| 
						 | 
					0b40544f29 | ||
| 
						 | 
					187da2c093 | ||
| 
						 | 
					9a2cf56d51 | ||
| 
						 | 
					0be41ec241 | ||
| 
						 | 
					f1171f7c2d | ||
| 
						 | 
					28ca6b5afa | ||
| 
						 | 
					bec102a843 | ||
| 
						 | 
					8f6f40d991 | ||
| 
						 | 
					e2a8ff24a9 | ||
| 
						 | 
					8588a86f9e | ||
| 
						 | 
					5cb9c3129b | ||
| 
						 | 
					4cc3d07426 | ||
| 
						 | 
					5d01a64719 | ||
| 
						 | 
					a276e06080 | ||
| 
						 | 
					fd5ff02042 | ||
| 
						 | 
					2b5b2cb84c | ||
| 
						 | 
					ca6849e65d | ||
| 
						 | 
					1535ac2ae9 | ||
| 
						 | 
					a4680a590f | ||
| 
						 | 
					fedb6816cd | ||
| 
						 | 
					f6152b4b64 | ||
| 
						 | 
					4b618047ce | ||
| 
						 | 
					2c6945be30 | ||
| 
						 | 
					9a6f4429a0 | ||
| 
						 | 
					4c21c56bfe | ||
| 
						 | 
					2a298b72eb | ||
| 
						 | 
					55c0539872 | ||
| 
						 | 
					9789a05c20 | ||
| 
						 | 
					d050de77f9 | ||
| 
						 | 
					95eb771dcd | ||
| 
						 | 
					4fb1acc212 | ||
| 
						 | 
					d3d3199870 | ||
| 
						 | 
					1ca63e3ae3 | ||
| 
						 | 
					59ce201915 | ||
| 
						 | 
					8d5d3a5d00 | ||
| 
						 | 
					37c8fd4842 | ||
| 
						 | 
					3c6ffbaedb | ||
| 
						 | 
					c7287a3caf | ||
| 
						 | 
					5a304a7637 | ||
| 
						 | 
					4c1d273e88 | ||
| 
						 | 
					a9d2f7e894 | ||
| 
						 | 
					682407f2d5 | ||
| 
						 | 
					bdff345529 | ||
| 
						 | 
					23109d6a9c | ||
| 
						 | 
					4bb028f48e | ||
| 
						 | 
					fec89790b1 | ||
| 
						 | 
					a5741a3f5e | ||
| 
						 | 
					863baa16ec | ||
| 
						 | 
					c7214f9a6f | ||
| 
						 | 
					8fd3afd56c | ||
| 
						 | 
					f9b2f2b955 | ||
| 
						 | 
					633b4a5ff6 | ||
| 
						 | 
					b4cd069d5e | ||
| 
						 | 
					0f8d03f81c | ||
| 
						 | 
					077174f4ed | ||
| 
						 | 
					e387eb5aba | ||
| 
						 | 
					4083bf81a0 | ||
| 
						 | 
					796173d08b | ||
| 
						 | 
					e575b6821e | ||
| 
						 | 
					d78be7e331 | ||
| 
						 | 
					15c8d83358 | ||
| 
						 | 
					e91d2338d8 | ||
| 
						 | 
					4b235346d6 | ||
| 
						 | 
					ad348291bb | ||
| 
						 | 
					2f1765c4ea | ||
| 
						 | 
					3c5b63d2d6 | ||
| 
						 | 
					cc51a7d4e0 | ||
| 
						 | 
					8af4ed7b4f | ||
| 
						 | 
					8192ebe1f8 | ||
| 
						 | 
					20ba04267c | ||
| 
						 | 
					743b28ce11 | ||
| 
						 | 
					caaa47d372 | ||
| 
						 | 
					10f100ac8a | ||
| 
						 | 
					8176041605 | ||
| 
						 | 
					87bec4c715 | ||
| 
						 | 
					190e8e27d8 | ||
| 
						 | 
					4efe62a016 | ||
| 
						 | 
					c64de2c980 | ||
| 
						 | 
					6ad98fb3fd | ||
| 
						 | 
					b08e09c370 | ||
| 
						 | 
					cdab8aa389 | ||
| 
						 | 
					3cd69a54b2 | ||
| 
						 | 
					627dcfff39 | ||
| 
						 | 
					df5cff3751 | ||
| 
						 | 
					79ae0a06d5 | ||
| 
						 | 
					2d2fa229ec | ||
| 
						 | 
					5a59fd6392 | ||
| 
						 | 
					0eb0faa26f | ||
| 
						 | 
					32761d863c | ||
| 
						 | 
					799c076384 | ||
| 
						 | 
					f1cb5bcad2 | ||
| 
						 | 
					9e8056d5a7 | ||
| 
						 | 
					c6f3620859 | ||
| 
						 | 
					59ae15a507 | ||
| 
						 | 
					40b35b4aa6 | ||
| 
						 | 
					be0f77d075 | ||
| 
						 | 
					0f00efed4c | ||
| 
						 | 
					e6137fd61d | ||
| 
						 | 
					8cd10ac4ef | ||
| 
						 | 
					64a57846d3 | ||
| 
						 | 
					72f976701a | ||
| 
						 | 
					5bd9cc7a6a | ||
| 
						 | 
					f660c89d51 | ||
| 
						 | 
					73dce4b2e4 | ||
| 
						 | 
					9f37a95941 | ||
| 
						 | 
					a130bc6d02 | ||
| 
						 | 
					348d0a7a18 | ||
| 
						 | 
					03f9daab34 | ||
| 
						 | 
					a8156c1d2e | ||
| 
						 | 
					3e669f369f | ||
| 
						 | 
					da779b4924 | ||
| 
						 | 
					89fb51dd2d | ||
| 
						 | 
					01ba00ca42 | ||
| 
						 | 
					e08bee320e | ||
| 
						 | 
					96731798db | ||
| 
						 | 
					c116339ddb | ||
| 
						 | 
					e643e2c6b7 | ||
| 
						 | 
					c63cc10ffa | ||
| 
						 | 
					dae7c920f6 | ||
| 
						 | 
					f462df021a | ||
| 
						 | 
					1a84d8675b | ||
| 
						 | 
					18ea0cefc3 | ||
| 
						 | 
					c806f804d8 | ||
| 
						 | 
					03c5b0fbd4 | ||
| 
						 | 
					95649b3936 | ||
| 
						 | 
					3aeb78ea4e | ||
| 
						 | 
					dd109dee8e | ||
| 
						 | 
					b514df2034 | ||
| 
						 | 
					0969bdd305 | ||
| 
						 | 
					1a9c655e3b | ||
| 
						 | 
					88db5ef279 | ||
| 
						 | 
					f8d8b39bba | ||
| 
						 | 
					dcd60025f8 | ||
| 
						 | 
					7e4674830e | ||
| 
						 | 
					9ce5d9ee75 | ||
| 
						 | 
					b49e75ff9a | ||
| 
						 | 
					abe7a3ac2a | ||
| 
						 | 
					717b1f72ed | ||
| 
						 | 
					26396311b5 | ||
| 
						 | 
					dffe658bac | ||
| 
						 | 
					33d94a6c99 | ||
| 
						 | 
					4d47921c9e | ||
| 
						 | 
					d94adc2638 | ||
| 
						 | 
					5c5d06d31d | ||
| 
						 | 
					cc872b68a8 | ||
| 
						 | 
					17cb14a336 | ||
| 
						 | 
					877f4c45d3 | ||
| 
						 | 
					02531431f2 | ||
| 
						 | 
					e02066e7ff | ||
| 
						 | 
					c9128b353d | ||
| 
						 | 
					e7c6f1a2dc | ||
| 
						 | 
					1a911e60a4 | ||
| 
						 | 
					46cbda0be4 | ||
| 
						 | 
					fa59f4b6a9 | ||
| 
						 | 
					4a702f3819 | ||
| 
						 | 
					6bac102a4d | ||
| 
						 | 
					958a22b7cf | ||
| 
						 | 
					97cd3afc75 | ||
| 
						 | 
					aa2a94ed81 | ||
| 
						 | 
					c7032546f1 | ||
| 
						 | 
					56781d3d2e | ||
| 
						 | 
					feb22fe5fe | ||
| 
						 | 
					d8dddb7c02 | ||
| 
						 | 
					4408d996fb | ||
| 
						 | 
					ed7516c69d | ||
| 
						 | 
					89af8e9d32 | ||
| 
						 | 
					36a9c0b5ff | ||
| 
						 | 
					9fb3bfb45a | ||
| 
						 | 
					d479e34043 | ||
| 
						 | 
					240089e5df | ||
| 
						 | 
					1c469a9480 | ||
| 
						 | 
					71f36332dd | ||
| 
						 | 
					8179d2ba74 | ||
| 
						 | 
					df4bad3245 | ||
| 
						 | 
					a7b5c8d6a8 | ||
| 
						 | 
					92b91c1878 | ||
| 
						 | 
					7ec1a206ea | ||
| 
						 | 
					51937c0869 | ||
| 
						 | 
					6b50761222 | ||
| 
						 | 
					6571408dc6 | ||
| 
						 | 
					b6fab35b9f | ||
| 
						 | 
					baec15387c | ||
| 
						 | 
					297d7fd9c0 | ||
| 
						 | 
					5002aea371 | ||
| 
						 | 
					5f7ad21633 | ||
| 
						 | 
					089d47f8d5 | ||
| 
						 | 
					74033a662d | ||
| 
						 | 
					fdef722fa1 | ||
| 
						 | 
					110d4f4c91 | ||
| 
						 | 
					0526e4f55a | ||
| 
						 | 
					39973a0236 | ||
| 
						 | 
					5d40a470a2 | ||
| 
						 | 
					4cc391461a | ||
| 
						 | 
					bf95333e5e | ||
| 
						 | 
					b7a34316d2 | ||
| 
						 | 
					74e453bdea | ||
| 
						 | 
					156a59e7a9 | ||
| 
						 | 
					aeca861f22 | ||
| 
						 | 
					42cb53fcfa | ||
| 
						 | 
					fe4d68e196 | ||
| 
						 | 
					25b7fd9c01 | ||
| 
						 | 
					e79e8b7dc4 | ||
| 
						 | 
					965a8b2bc4 | ||
| 
						 | 
					a8ac2f8664 | ||
| 
						 | 
					fb0e99b884 | ||
| 
						 | 
					9c6e9a4532 | ||
| 
						 | 
					67af74992e | ||
| 
						 | 
					103c508ffa | ||
| 
						 | 
					2876773381 | ||
| 
						 | 
					f06eaa873e | ||
| 
						 | 
					ece34e8951 | ||
| 
						 | 
					2262a32dd7 | ||
| 
						 | 
					c6c0e23a32 | ||
| 
						 | 
					02b324a23d | ||
| 
						 | 
					b8005afc20 | ||
| 
						 | 
					073522bc6c | ||
| 
						 | 
					9248cb0549 | ||
| 
						 | 
					6b41b61119 | ||
| 
						 | 
					591bbe9c90 | ||
| 
						 | 
					fc7376016c | ||
| 
						 | 
					97a37c2319 | ||
| 
						 | 
					3afed78a6a | ||
| 
						 | 
					4279a0ca98 | ||
| 
						 | 
					edcc7d2dd3 | ||
| 
						 | 
					7f60b5aa40 | ||
| 
						 | 
					65adb79fb6 | ||
| 
						 | 
					aeeb29a356 | ||
| 
						 | 
					902b2a0a45 | ||
| 
						 | 
					6d9c22cd26 | ||
| 
						 | 
					729baf58b2 | ||
| 
						 | 
					4c9afeca34 | ||
| 
						 | 
					6da7877bf5 | ||
| 
						 | 
					b4e5de51ec | ||
| 
						 | 
					a4b5f22554 | ||
| 
						 | 
					ff08984246 | ||
| 
						 | 
					137c5803c3 | ||
| 
						 | 
					3eec021a1f | ||
| 
						 | 
					5a33b73309 | ||
| 
						 | 
					0b4e98490b | ||
| 
						 | 
					80a846e119 | ||
| 
						 | 
					434d60cd95 | ||
| 
						 | 
					efe8902f0b | ||
| 
						 | 
					44fb345437 | ||
| 
						 | 
					9993976ae4 | ||
| 
						 | 
					b387fb0385 | ||
| 
						 | 
					10daa766a1 | ||
| 
						 | 
					7b107eea51 | ||
| 
						 | 
					646b885cbf | ||
| 
						 | 
					0bfd0b598a | ||
| 
						 | 
					fd873c69a4 | ||
| 
						 | 
					d64db7409b | ||
| 
						 | 
					27fec0e3bd | ||
| 
						 | 
					65f934dc93 | ||
| 
						 | 
					d51d784f85 | ||
| 
						 | 
					aa85963987 | ||
| 
						 | 
					413575f7a5 | ||
| 
						 | 
					b7b4796bf2 | ||
| 
						 | 
					fcbc8c830e | ||
| 
						 | 
					f48ce130c7 | ||
| 
						 | 
					13e69f546c | ||
| 
						 | 
					63ec7b7479 | ||
| 
						 | 
					7b6d7001d8 | ||
| 
						 | 
					39ce6e79e7 | ||
| 
						 | 
					5c961d89df | ||
| 
						 | 
					3c4d6c9eba | ||
| 
						 | 
					349e2e3e21 | ||
| 
						 | 
					551fa9dfbf | ||
| 
						 | 
					ce3674430b | ||
| 
						 | 
					5cdfaeb37b | ||
| 
						 | 
					38612b4edc | ||
| 
						 | 
					6c5b442a9b | ||
| 
						 | 
					5a5523698d | ||
| 
						 | 
					05a2c206be | ||
| 
						 | 
					8ca21983d8 | ||
| 
						 | 
					20326b8b1b | ||
| 
						 | 
					5d534e2fe6 | ||
| 
						 | 
					234e230c87 | ||
| 
						 | 
					34ae0f9d20 | ||
| 
						 | 
					df09e5f9e1 | ||
| 
						 | 
					3af2f7656c | ||
| 
						 | 
					74e716bb64 | ||
| 
						 | 
					85f76ac90b | ||
| 
						 | 
					7f36e39676 | ||
| 
						 | 
					ebe3f89ea4 | ||
| 
						 | 
					b5de8af234 | ||
| 
						 | 
					eb817499b0 | ||
| 
						 | 
					e2af9232b2 | ||
| 
						 | 
					9ca667065e | ||
| 
						 | 
					ae16f68f4a | ||
| 
						 | 
					3cd98c7894 | ||
| 
						 | 
					2866e68838 | ||
| 
						 | 
					be8786a6a4 | ||
| 
						 | 
					0e841bdc54 | ||
| 
						 | 
					225dceb046 | ||
| 
						 | 
					b0d4f95899 | ||
| 
						 | 
					d443aca863 | ||
| 
						 | 
					2ebc6e6a92 | ||
| 
						 | 
					f2ad10a97d | ||
| 
						 | 
					ea46fe2dd4 | ||
| 
						 | 
					202e76cfb0 | ||
| 
						 | 
					3a68d7b467 | ||
| 
						 | 
					795cc5059a | ||
| 
						 | 
					5dc846fad0 | ||
| 
						 | 
					d5c4c4c10e | ||
| 
						 | 
					1ac3e3315e | ||
| 
						 | 
					0e4dc2fc74 | ||
| 
						 | 
					9bb8dc8e42 | ||
| 
						 | 
					154b55dae3 | ||
| 
						 | 
					6de7ef9b8d | ||
| 
						 | 
					392105265c | ||
| 
						 | 
					51661d8600 | ||
| 
						 | 
					b5809a68bf | ||
| 
						 | 
					7733d455c8 | ||
| 
						 | 
					0a98b09bc2 | ||
| 
						 | 
					302efc19ea | ||
| 
						 | 
					55a1fa8a56 | ||
| 
						 | 
					dce1088450 | ||
| 
						 | 
					a171dbfc27 | ||
| 
						 | 
					11a141dec9 | ||
| 
						 | 
					818282710b | ||
| 
						 | 
					7a7c093ab0 | ||
| 
						 | 
					ce7b2a40d0 | ||
| 
						 | 
					cfcec69331 | ||
| 
						 | 
					91645066e2 | ||
| 
						 | 
					dee5d76923 | ||
| 
						 | 
					363a4e1114 | ||
| 
						 | 
					ef0c08cdfe | ||
| 
						 | 
					3210735c49 | ||
| 
						 | 
					aab4fca422 | ||
| 
						 | 
					891d7f2329 | ||
| 
						 | 
					b24676ce88 | ||
| 
						 | 
					cca4828ac9 | ||
| 
						 | 
					bae611f216 | ||
| 
						 | 
					d4e16d3e97 | ||
| 
						 | 
					65dc7d0272 | ||
| 
						 | 
					5404179338 | ||
| 
						 | 
					7df97fb59f | ||
| 
						 | 
					3187e42a23 | ||
| 
						 | 
					f1927d71e4 | ||
| 
						 | 
					eeeb4daabc | ||
| 
						 | 
					3c4fc580bb | ||
| 
						 | 
					17f3c40a31 | ||
| 
						 | 
					505ed3088f | ||
| 
						 | 
					0b976545c7 | ||
| 
						 | 
					a047951477 | ||
| 
						 | 
					6ab92c8b62 | ||
| 
						 | 
					f36cd07685 | ||
| 
						 | 
					668d975039 | ||
| 
						 | 
					9ab3406ddb | ||
| 
						 | 
					1b91a2e2cf | ||
| 
						 | 
					2c288bda42 | ||
| 
						 | 
					0b8c922da9 | ||
| 
						 | 
					3fe294e4ef | ||
| 
						 | 
					921a145592 | ||
| 
						 | 
					0c24eed73a | ||
| 
						 | 
					29ce2c1201 | ||
| 
						 | 
					532c74ae86 | ||
| 
						 | 
					9beb5af82e | ||
| 
						 | 
					9e6dd23876 | ||
| 
						 | 
					7a8501e307 | ||
| 
						 | 
					781cc523af | ||
| 
						 | 
					c6f45d4314 | ||
| 
						 | 
					d11d05d07a | ||
| 
						 | 
					e179aadfdf | ||
| 
						 | 
					d6a9615347 | ||
| 
						 | 
					c6306eb798 | ||
| 
						 | 
					bcfde70d73 | ||
| 
						 | 
					53e893615d | ||
| 
						 | 
					303692b5ed | ||
| 
						 | 
					58ca755f40 | ||
| 
						 | 
					770234afa2 | ||
| 
						 | 
					d77c3dfd02 | ||
| 
						 | 
					c23d8a74dc | ||
| 
						 | 
					74a5ff5f43 | ||
| 
						 | 
					071940680f | ||
| 
						 | 
					69d3b2d824 | ||
| 
						 | 
					d891ff9fd9 | ||
| 
						 | 
					6af22cf0ef | ||
| 
						 | 
					fff24d5e35 | ||
| 
						 | 
					ceba827e9a | ||
| 
						 | 
					a0432a1e80 | ||
| 
						 | 
					cfcf32d038 | ||
| 
						 | 
					a67bdc34fa | ||
| 
						 | 
					b3a653c245 | ||
| 
						 | 
					4a34b7252e | ||
| 
						 | 
					7e45ec57a8 | ||
| 
						 | 
					afbaa80b8b | ||
| 
						 | 
					115d243428 | ||
| 
						 | 
					7151f63a5f | ||
| 
						 | 
					597e7b1805 | ||
| 
						 | 
					2934c2ce43 | ||
| 
						 | 
					0f6e296a8e | ||
| 
						 | 
					9c228928b6 | ||
| 
						 | 
					ff3a2b8eab | ||
| 
						 | 
					c4105fa035 | ||
| 
						 | 
					871dbd3c92 | ||
| 
						 | 
					c9ed14e6d6 | ||
| 
						 | 
					1ad85e5061 | ||
| 
						 | 
					09fbc6c952 | ||
| 
						 | 
					895ec266bb | ||
| 
						 | 
					d85448f3bb | ||
| 
						 | 
					99d46e8c27 | ||
| 
						 | 
					4afdff39d7 | ||
| 
						 | 
					661a807c65 | ||
| 
						 | 
					6d58c4546e | ||
| 
						 | 
					38ffbc0222 | ||
| 
						 | 
					fefb166c52 | ||
| 
						 | 
					dcb3c22e0b | ||
| 
						 | 
					47a53c9e46 | ||
| 
						 | 
					1413cd87eb | ||
| 
						 | 
					c92e184f75 | ||
| 
						 | 
					3906e6ce60 | ||
| 
						 | 
					c7d3c3db0d | ||
| 
						 | 
					d6639d05c2 | ||
| 
						 | 
					633cf7cbad | ||
| 
						 | 
					a5647b79ce | ||
| 
						 | 
					ba5059dd66 | ||
| 
						 | 
					bb8abbbbae | ||
| 
						 | 
					561504fffa | ||
| 
						 | 
					23e6b8adc8 | ||
| 
						 | 
					3e0ea7d07a | ||
| 
						 | 
					94fd3201b2 | ||
| 
						 | 
					0b3f3e1ad9 | ||
| 
						 | 
					a05d2a0c05 | ||
| 
						 | 
					0b14e0b367 | ||
| 
						 | 
					66e8777769 | ||
| 
						 | 
					348486ced4 | ||
| 
						 | 
					f1f300e629 | ||
| 
						 | 
					dd17922afc | ||
| 
						 | 
					40fd4cb86a | ||
| 
						 | 
					9e9b75ae4d | ||
| 
						 | 
					8abf76ddb9 | ||
| 
						 | 
					c95da745bc | ||
| 
						 | 
					0cd235eef6 | ||
| 
						 | 
					77315556f1 | ||
| 
						 | 
					c379c181e0 | ||
| 
						 | 
					31a2ec2d88 | ||
| 
						 | 
					b88a52504e | ||
| 
						 | 
					a95567af99 | ||
| 
						 | 
					849edab8ec | ||
| 
						 | 
					b158a1d946 | ||
| 
						 | 
					fa2672f9fc | ||
| 
						 | 
					28e3614bc0 | ||
| 
						 | 
					208e095f72 | ||
| 
						 | 
					0ae7abe57c | ||
| 
						 | 
					dc0a294a73 | ||
| 
						 | 
					468c99257c | ||
| 
						 | 
					af8e8d63f9 | ||
| 
						 | 
					e092418d8b | ||
| 
						 | 
					e33e3045c6 | ||
| 
						 | 
					cb6568bf21 | ||
| 
						 | 
					235b3ba479 | ||
| 
						 | 
					5b3330e0cf | ||
| 
						 | 
					aab771fbdf | ||
| 
						 | 
					00f95a93f5 | ||
| 
						 | 
					1724e7c461 | ||
| 
						 | 
					3b98a5ddac | ||
| 
						 | 
					8b59cc93d5 | ||
| 
						 | 
					c3e4e7c182 | ||
| 
						 | 
					38348005b3 | ||
| 
						 | 
					208c4b9128 | ||
| 
						 | 
					ec574c2c41 | ||
| 
						 | 
					871be928a8 | ||
| 
						 | 
					b20d4f8626 | ||
| 
						 | 
					073d7a5985 | ||
| 
						 | 
					40306424b1 | ||
| 
						 | 
					ecb3bfe543 | ||
| 
						 | 
					abeac45abe | ||
| 
						 | 
					0fca93ac60 | ||
| 
						 | 
					857e5f329a | ||
| 
						 | 
					053419cd24 | ||
| 
						 | 
					99e207bab0 | ||
| 
						 | 
					0067bbe7a7 | ||
| 
						 | 
					45aa690868 | ||
| 
						 | 
					beb245e92f | ||
| 
						 | 
					c424df0d2f | ||
| 
						 | 
					87929e4b35 | ||
| 
						 | 
					d76736fc5e | ||
| 
						 | 
					0f9b77223e | ||
| 
						 | 
					9f47175a40 | ||
| 
						 | 
					a1a8713aad | ||
| 
						 | 
					6501a06d46 | ||
| 
						 | 
					8d89fbae5a | ||
| 
						 | 
					7a2cf5455c | ||
| 
						 | 
					7125a7ca8b | ||
| 
						 | 
					54d47874f7 | ||
| 
						 | 
					2761012f69 | ||
| 
						 | 
					3de2a1e635 | ||
| 
						 | 
					1eff9ac0c5 | ||
| 
						 | 
					54f329fe93 | ||
| 
						 | 
					9baa2ef53b | ||
| 
						 | 
					6bde5972c3 | ||
| 
						 | 
					36f6cb369b | ||
| 
						 | 
					b845d58b04 | ||
| 
						 | 
					efb113c736 | ||
| 
						 | 
					3ce59dae88 | ||
| 
						 | 
					f0b0caa3fa | ||
| 
						 | 
					58384838c3 | ||
| 
						 | 
					abb870d1ad | ||
| 
						 | 
					daa982bc01 | ||
| 
						 | 
					767414a292 | ||
| 
						 | 
					7b417b388a | ||
| 
						 | 
					44424ceee9 | ||
| 
						 | 
					08a5b7f800 | ||
| 
						 | 
					1cde6f1d52 | ||
| 
						 | 
					2d8acd8039 | ||
| 
						 | 
					67035ede49 | ||
| 
						 | 
					eb6c37da43 | ||
| 
						 | 
					2736595628 | ||
| 
						 | 
					7b1a2bbe17 | ||
| 
						 | 
					c25303c3d5 | ||
| 
						 | 
					cc025e1226 | ||
| 
						 | 
					eca1b76f01 | ||
| 
						 | 
					366cbfb04a | ||
| 
						 | 
					18bb3d1e35 | ||
| 
						 | 
					10e7194db1 | ||
| 
						 | 
					ef357c4bf2 | ||
| 
						 | 
					5260e68f64 | ||
| 
						 | 
					6a1ca41e17 | ||
| 
						 | 
					c99dcbd2d6 | ||
| 
						 | 
					da0db53a75 | ||
| 
						 | 
					c52b01f326 | ||
| 
						 | 
					36597dc40f | ||
| 
						 | 
					9b4556c469 | ||
| 
						 | 
					f3098c4d8a | ||
| 
						 | 
					bdb3f7a769 | ||
| 
						 | 
					afb5b55de6 | ||
| 
						 | 
					c23cec29a3 | ||
| 
						 | 
					e5b9fac281 | ||
| 
						 | 
					08c1d0d3bc | ||
| 
						 | 
					20e91e8375 | ||
| 
						 | 
					f9c6878714 | ||
| 
						 | 
					8c5dc3ad40 | ||
| 
						 | 
					1d2e86aed9 | ||
| 
						 | 
					a2f7e3a5bb | ||
| 
						 | 
					f2a3a3522c | ||
| 
						 | 
					b487ef0833 | ||
| 
						 | 
					d0922f29a3 | ||
| 
						 | 
					b90bcbe79e | ||
| 
						 | 
					8236e85178 | ||
| 
						 | 
					803abae206 | ||
| 
						 | 
					50bdd8a9e7 | ||
| 
						 | 
					34554a7ad4 | ||
| 
						 | 
					93e1659586 | ||
| 
						 | 
					b576abb457 | ||
| 
						 | 
					f166bccc8f | ||
| 
						 | 
					5a2ba45e09 | ||
| 
						 | 
					e133e1213f | ||
| 
						 | 
					454d6691d8 | ||
| 
						 | 
					d793aebaed | ||
| 
						 | 
					5991ddfd7a | ||
| 
						 | 
					a88bc6bbd3 | ||
| 
						 | 
					46c8c43266 | ||
| 
						 | 
					fedf9f3902 | ||
| 
						 | 
					0f862ea18c | ||
| 
						 | 
					c8e30044b8 | ||
| 
						 | 
					cec3a53cbd | ||
| 
						 | 
					6fc5b0bb17 | ||
| 
						 | 
					9b0a8bc198 | ||
| 
						 | 
					e5e74ffb97 | ||
| 
						 | 
					eb99a7ee5f | ||
| 
						 | 
					50891fece7 | ||
| 
						 | 
					ef53099e35 | ||
| 
						 | 
					c0a10ca8dc | ||
| 
						 | 
					8f88eb1fa7 | ||
| 
						 | 
					447b1d7170 | ||
| 
						 | 
					dbddab2799 | ||
| 
						 | 
					802622ac1c | ||
| 
						 | 
					e0e56865a0 | ||
| 
						 | 
					eb11aaccbb | ||
| 
						 | 
					d207e7cf88 | ||
| 
						 | 
					36cf7bccde | ||
| 
						 | 
					5fd5ce0838 | ||
| 
						 | 
					6ae796b1ee | ||
| 
						 | 
					9c3e23fb64 | ||
| 
						 | 
					5f9f2b7396 | ||
| 
						 | 
					4618f3da74 | ||
| 
						 | 
					eb0387a848 | ||
| 
						 | 
					fe6dc08b79 | ||
| 
						 | 
					4f2a5e06da | ||
| 
						 | 
					2c8d32de33 | ||
| 
						 | 
					2b70537d7b | ||
| 
						 | 
					6a4f0a114d | ||
| 
						 | 
					5adcaa4385 | ||
| 
						 | 
					51c8e53ffe | ||
| 
						 | 
					4f9f96f646 | ||
| 
						 | 
					5fb3df4aff | ||
| 
						 | 
					7a9054ec79 | ||
| 
						 | 
					2770590d5a | ||
| 
						 | 
					e9cb9c2811 | ||
| 
						 | 
					1cab2c6dcf | ||
| 
						 | 
					86e709d3de | ||
| 
						 | 
					8519c32d25 | ||
| 
						 | 
					f3dc18d874 | ||
| 
						 | 
					1293ce58ac | ||
| 
						 | 
					0a3c8b6291 | ||
| 
						 | 
					134cff47ab | ||
| 
						 | 
					f137bef973 | ||
| 
						 | 
					2bf94b3116 | ||
| 
						 | 
					6bcd846b52 | ||
| 
						 | 
					2fb47e073a | ||
| 
						 | 
					05b4029662 | ||
| 
						 | 
					33d507f1fe | ||
| 
						 | 
					c44b9ee95e | ||
| 
						 | 
					8126094cf1 | ||
| 
						 | 
					0ac22e4f5a | ||
| 
						 | 
					c31b124d7a | ||
| 
						 | 
					47b8dab29e | ||
| 
						 | 
					91e6a3855b | ||
| 
						 | 
					5623100e43 | ||
| 
						 | 
					6eb08fbf8b | ||
| 
						 | 
					437d76c19a | ||
| 
						 | 
					2152ee8601 | ||
| 
						 | 
					a1cab7cead | ||
| 
						 | 
					8b95c38707 | ||
| 
						 | 
					c6b55a8d48 | ||
| 
						 | 
					aded78d9e2 | ||
| 
						 | 
					7745f5d881 | ||
| 
						 | 
					18b7f87409 | ||
| 
						 | 
					62a29bbf7b | ||
| 
						 | 
					2fc31a4872 | ||
| 
						 | 
					44c636df89 | ||
| 
						 | 
					1e055db69c | ||
| 
						 | 
					0ecedbdb03 | ||
| 
						 | 
					43c0a396a2 | ||
| 
						 | 
					00f3977f77 | ||
| 
						 | 
					e26005adea | ||
| 
						 | 
					4b0d9eed45 | ||
| 
						 | 
					3efa45c3a2 | ||
| 
						 | 
					2727dbf78d | ||
| 
						 | 
					e3f7e05c27 | ||
| 
						 | 
					da54ed4412 | ||
| 
						 | 
					d8edbf3a93 | ||
| 
						 | 
					a62db07f58 | ||
| 
						 | 
					b58faab5e7 | ||
| 
						 | 
					854cad639e | ||
| 
						 | 
					cb25a0e30c | ||
| 
						 | 
					377086af3d | ||
| 
						 | 
					820eedcb50 | ||
| 
						 | 
					da273188f3 | ||
| 
						 | 
					1bd9258272 | ||
| 
						 | 
					3b84a43076 | ||
| 
						 | 
					2c8bedd12c | ||
| 
						 | 
					1a3fe4212f | ||
| 
						 | 
					c4cfbdf5a5 | ||
| 
						 | 
					a4a590b5b1 | ||
| 
						 | 
					7f69fd3b39 | ||
| 
						 | 
					a7e5259c33 | ||
| 
						 | 
					8cc98b2358 | ||
| 
						 | 
					f24c674b04 | ||
| 
						 | 
					58b53721af | ||
| 
						 | 
					c5a088d341 | ||
| 
						 | 
					92743d423a | ||
| 
						 | 
					9e1ee3364a | ||
| 
						 | 
					e0edf1e041 | 
							
								
								
									
										20
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
*.pyc
 | 
			
		||||
*.pyo
 | 
			
		||||
*~
 | 
			
		||||
*.DS_Store
 | 
			
		||||
wine-py2exe/
 | 
			
		||||
py2exe.log
 | 
			
		||||
*.kate-swp
 | 
			
		||||
build/
 | 
			
		||||
dist/
 | 
			
		||||
MANIFEST
 | 
			
		||||
README.txt
 | 
			
		||||
youtube-dl.1
 | 
			
		||||
youtube-dl.bash-completion
 | 
			
		||||
youtube-dl
 | 
			
		||||
youtube-dl.exe
 | 
			
		||||
youtube-dl.tar.gz
 | 
			
		||||
.coverage
 | 
			
		||||
cover/
 | 
			
		||||
updates_key.pem
 | 
			
		||||
*.egg-info
 | 
			
		||||
							
								
								
									
										15
									
								
								.travis.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								.travis.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
language: python
 | 
			
		||||
python:
 | 
			
		||||
  - "2.6"
 | 
			
		||||
  - "2.7"
 | 
			
		||||
  - "3.3"
 | 
			
		||||
script: nosetests test --verbose
 | 
			
		||||
notifications:
 | 
			
		||||
  email:
 | 
			
		||||
    - filippo.valsorda@gmail.com
 | 
			
		||||
    - phihag@phihag.de
 | 
			
		||||
    - jaime.marquinez.ferrandiz+travis@gmail.com
 | 
			
		||||
#  irc:
 | 
			
		||||
#    channels:
 | 
			
		||||
#      - "irc.freenode.org#youtube-dl"
 | 
			
		||||
#    skip_join: true
 | 
			
		||||
							
								
								
									
										14
									
								
								CHANGELOG
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								CHANGELOG
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
2013.01.02  Codename: GIULIA
 | 
			
		||||
 | 
			
		||||
    * Add support for ComedyCentral clips <nto>
 | 
			
		||||
    * Corrected Vimeo description fetching <Nick Daniels>
 | 
			
		||||
    * Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
 | 
			
		||||
    * --verbose offers more environment info
 | 
			
		||||
    * New info_dict field: uploader_id
 | 
			
		||||
    * New updates system, with signature checking
 | 
			
		||||
    * New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
 | 
			
		||||
    * Fixed IEs: BlipTv
 | 
			
		||||
    * Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
 | 
			
		||||
    * Simplified IEs and test code
 | 
			
		||||
    * Various (Python 3 and other) fixes
 | 
			
		||||
    * Revamped and expanded tests
 | 
			
		||||
@@ -1 +1 @@
 | 
			
		||||
2011.02.25
 | 
			
		||||
2012.12.99
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										24
									
								
								LICENSE
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								LICENSE
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,24 @@
 | 
			
		||||
This is free and unencumbered software released into the public domain.
 | 
			
		||||
 | 
			
		||||
Anyone is free to copy, modify, publish, use, compile, sell, or
 | 
			
		||||
distribute this software, either in source code form or as a compiled
 | 
			
		||||
binary, for any purpose, commercial or non-commercial, and by any
 | 
			
		||||
means.
 | 
			
		||||
 | 
			
		||||
In jurisdictions that recognize copyright laws, the author or authors
 | 
			
		||||
of this software dedicate any and all copyright interest in the
 | 
			
		||||
software to the public domain. We make this dedication for the benefit
 | 
			
		||||
of the public at large and to the detriment of our heirs and
 | 
			
		||||
successors. We intend this dedication to be an overt act of
 | 
			
		||||
relinquishment in perpetuity of all present and future rights to this
 | 
			
		||||
software under copyright law.
 | 
			
		||||
 | 
			
		||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 | 
			
		||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 | 
			
		||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 | 
			
		||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
			
		||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
			
		||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
			
		||||
OTHER DEALINGS IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
For more information, please refer to <http://unlicense.org/>
 | 
			
		||||
							
								
								
									
										5
									
								
								MANIFEST.in
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								MANIFEST.in
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
include README.md
 | 
			
		||||
include test/*.py
 | 
			
		||||
include test/*.json
 | 
			
		||||
include youtube-dl.bash-completion
 | 
			
		||||
include youtube-dl.1
 | 
			
		||||
							
								
								
									
										68
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,68 @@
 | 
			
		||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
 | 
			
		||||
 | 
			
		||||
clean:
 | 
			
		||||
	rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
 | 
			
		||||
 | 
			
		||||
cleanall: clean
 | 
			
		||||
	rm -f youtube-dl youtube-dl.exe
 | 
			
		||||
 | 
			
		||||
PREFIX=/usr/local
 | 
			
		||||
BINDIR=$(PREFIX)/bin
 | 
			
		||||
MANDIR=$(PREFIX)/man
 | 
			
		||||
SYSCONFDIR=/etc
 | 
			
		||||
PYTHON=/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
 | 
			
		||||
	install -d $(DESTDIR)$(BINDIR)
 | 
			
		||||
	install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
 | 
			
		||||
	install -d $(DESTDIR)$(MANDIR)/man1
 | 
			
		||||
	install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
 | 
			
		||||
	install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
 | 
			
		||||
	install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
 | 
			
		||||
 | 
			
		||||
test:
 | 
			
		||||
	#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
 | 
			
		||||
	nosetests --verbose test
 | 
			
		||||
 | 
			
		||||
tar: youtube-dl.tar.gz
 | 
			
		||||
 | 
			
		||||
.PHONY: all clean install test tar bash-completion pypi-files
 | 
			
		||||
 | 
			
		||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
 | 
			
		||||
 | 
			
		||||
youtube-dl: youtube_dl/*.py
 | 
			
		||||
	zip --quiet youtube-dl youtube_dl/*.py
 | 
			
		||||
	zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
 | 
			
		||||
	echo '#!$(PYTHON)' > youtube-dl
 | 
			
		||||
	cat youtube-dl.zip >> youtube-dl
 | 
			
		||||
	rm youtube-dl.zip
 | 
			
		||||
	chmod a+x youtube-dl
 | 
			
		||||
 | 
			
		||||
README.md: youtube_dl/*.py
 | 
			
		||||
	COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
 | 
			
		||||
 | 
			
		||||
README.txt: README.md
 | 
			
		||||
	pandoc -f markdown -t plain README.md -o README.txt
 | 
			
		||||
 | 
			
		||||
youtube-dl.1: README.md
 | 
			
		||||
	pandoc -s -f markdown -t man README.md -o youtube-dl.1
 | 
			
		||||
 | 
			
		||||
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
 | 
			
		||||
	python devscripts/bash-completion.py
 | 
			
		||||
 | 
			
		||||
bash-completion: youtube-dl.bash-completion
 | 
			
		||||
 | 
			
		||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
 | 
			
		||||
	@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
 | 
			
		||||
		--exclude '*.DS_Store' \
 | 
			
		||||
		--exclude '*.kate-swp' \
 | 
			
		||||
		--exclude '*.pyc' \
 | 
			
		||||
		--exclude '*.pyo' \
 | 
			
		||||
		--exclude '*~' \
 | 
			
		||||
		--exclude '__pycache' \
 | 
			
		||||
		--exclude '.git' \
 | 
			
		||||
		-- \
 | 
			
		||||
		bin devscripts test youtube_dl \
 | 
			
		||||
		CHANGELOG LICENSE README.md README.txt \
 | 
			
		||||
		Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
 | 
			
		||||
		youtube-dl
 | 
			
		||||
							
								
								
									
										228
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										228
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,228 @@
 | 
			
		||||
% YOUTUBE-DL(1)
 | 
			
		||||
 | 
			
		||||
# NAME
 | 
			
		||||
youtube-dl
 | 
			
		||||
 | 
			
		||||
# SYNOPSIS
 | 
			
		||||
**youtube-dl** [OPTIONS] URL [URL...]
 | 
			
		||||
 | 
			
		||||
# DESCRIPTION
 | 
			
		||||
**youtube-dl** is a small command-line program to download videos from
 | 
			
		||||
YouTube.com and a few more sites. It requires the Python interpreter, version
 | 
			
		||||
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
 | 
			
		||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
 | 
			
		||||
which means you can modify it, redistribute it or use it however you like.
 | 
			
		||||
 | 
			
		||||
# OPTIONS
 | 
			
		||||
    -h, --help                 print this help text and exit
 | 
			
		||||
    --version                  print program version and exit
 | 
			
		||||
    -U, --update               update this program to latest version
 | 
			
		||||
    -i, --ignore-errors        continue on download errors
 | 
			
		||||
    -r, --rate-limit LIMIT     maximum download rate (e.g. 50k or 44.6m)
 | 
			
		||||
    -R, --retries RETRIES      number of retries (default is 10)
 | 
			
		||||
    --buffer-size SIZE         size of download buffer (e.g. 1024 or 16k)
 | 
			
		||||
                               (default is 1024)
 | 
			
		||||
    --no-resize-buffer         do not automatically adjust the buffer size. By
 | 
			
		||||
                               default, the buffer size is automatically resized
 | 
			
		||||
                               from an initial value of SIZE.
 | 
			
		||||
    --dump-user-agent          display the current browser identification
 | 
			
		||||
    --user-agent UA            specify a custom user agent
 | 
			
		||||
    --list-extractors          List all supported extractors and the URLs they
 | 
			
		||||
                               would handle
 | 
			
		||||
 | 
			
		||||
## Video Selection:
 | 
			
		||||
    --playlist-start NUMBER    playlist video to start at (default is 1)
 | 
			
		||||
    --playlist-end NUMBER      playlist video to end at (default is last)
 | 
			
		||||
    --match-title REGEX        download only matching titles (regex or caseless
 | 
			
		||||
                               sub-string)
 | 
			
		||||
    --reject-title REGEX       skip download for matching titles (regex or
 | 
			
		||||
                               caseless sub-string)
 | 
			
		||||
    --max-downloads NUMBER     Abort after downloading NUMBER files
 | 
			
		||||
    --min-filesize SIZE        Do not download any videos smaller than SIZE
 | 
			
		||||
                               (e.g. 50k or 44.6m)
 | 
			
		||||
    --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
 | 
			
		||||
                               50k or 44.6m)
 | 
			
		||||
 | 
			
		||||
## Filesystem Options:
 | 
			
		||||
    -t, --title                use title in file name
 | 
			
		||||
    --id                       use video ID in file name
 | 
			
		||||
    -l, --literal              [deprecated] alias of --title
 | 
			
		||||
    -A, --auto-number          number downloaded files starting from 00000
 | 
			
		||||
    -o, --output TEMPLATE      output filename template. Use %(title)s to get
 | 
			
		||||
                               the title, %(uploader)s for the uploader name,
 | 
			
		||||
                               %(uploader_id)s for the uploader nickname if
 | 
			
		||||
                               different, %(autonumber)s to get an automatically
 | 
			
		||||
                               incremented number, %(ext)s for the filename
 | 
			
		||||
                               extension, %(upload_date)s for the upload date
 | 
			
		||||
                               (YYYYMMDD), %(extractor)s for the provider
 | 
			
		||||
                               (youtube, metacafe, etc), %(id)s for the video id
 | 
			
		||||
                               , %(playlist)s for the playlist the video is in,
 | 
			
		||||
                               %(playlist_index)s for the position in the
 | 
			
		||||
                               playlist and %% for a literal percent. Use - to
 | 
			
		||||
                               output to stdout. Can also be used to download to
 | 
			
		||||
                               a different directory, for example with -o '/my/d
 | 
			
		||||
                               ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
 | 
			
		||||
    --autonumber-size NUMBER   Specifies the number of digits in %(autonumber)s
 | 
			
		||||
                               when it is present in output filename template or
 | 
			
		||||
                               --autonumber option is given
 | 
			
		||||
    --restrict-filenames       Restrict filenames to only ASCII characters, and
 | 
			
		||||
                               avoid "&" and spaces in filenames
 | 
			
		||||
    -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
 | 
			
		||||
    -w, --no-overwrites        do not overwrite files
 | 
			
		||||
    -c, --continue             resume partially downloaded files
 | 
			
		||||
    --no-continue              do not resume partially downloaded files (restart
 | 
			
		||||
                               from beginning)
 | 
			
		||||
    --cookies FILE             file to read cookies from and dump cookie jar in
 | 
			
		||||
    --no-part                  do not use .part files
 | 
			
		||||
    --no-mtime                 do not use the Last-modified header to set the
 | 
			
		||||
                               file modification time
 | 
			
		||||
    --write-description        write video description to a .description file
 | 
			
		||||
    --write-info-json          write video metadata to a .info.json file
 | 
			
		||||
 | 
			
		||||
## Verbosity / Simulation Options:
 | 
			
		||||
    -q, --quiet                activates quiet mode
 | 
			
		||||
    -s, --simulate             do not download the video and do not write
 | 
			
		||||
                               anything to disk
 | 
			
		||||
    --skip-download            do not download the video
 | 
			
		||||
    -g, --get-url              simulate, quiet but print URL
 | 
			
		||||
    -e, --get-title            simulate, quiet but print title
 | 
			
		||||
    --get-thumbnail            simulate, quiet but print thumbnail URL
 | 
			
		||||
    --get-description          simulate, quiet but print video description
 | 
			
		||||
    --get-filename             simulate, quiet but print output filename
 | 
			
		||||
    --get-format               simulate, quiet but print output format
 | 
			
		||||
    --newline                  output progress bar as new lines
 | 
			
		||||
    --no-progress              do not print progress bar
 | 
			
		||||
    --console-title            display progress in console titlebar
 | 
			
		||||
    -v, --verbose              print various debugging information
 | 
			
		||||
    --dump-intermediate-pages  print downloaded pages to debug problems(very
 | 
			
		||||
                               verbose)
 | 
			
		||||
 | 
			
		||||
## Video Format Options:
 | 
			
		||||
    -f, --format FORMAT        video format code
 | 
			
		||||
    --all-formats              download all available video formats
 | 
			
		||||
    --prefer-free-formats      prefer free video formats unless a specific one
 | 
			
		||||
                               is requested
 | 
			
		||||
    --max-quality FORMAT       highest quality format to download
 | 
			
		||||
    -F, --list-formats         list all available formats (currently youtube
 | 
			
		||||
                               only)
 | 
			
		||||
    --write-sub                write subtitle file (currently youtube only)
 | 
			
		||||
    --only-sub                 downloads only the subtitles (no video)
 | 
			
		||||
    --all-subs                 downloads all the available subtitles of the
 | 
			
		||||
                               video (currently youtube only)
 | 
			
		||||
    --list-subs                lists all available subtitles for the video
 | 
			
		||||
                               (currently youtube only)
 | 
			
		||||
    --sub-format LANG          subtitle format [srt/sbv] (default=srt)
 | 
			
		||||
                               (currently youtube only)
 | 
			
		||||
    --sub-lang LANG            language of the subtitles to download (optional)
 | 
			
		||||
                               use IETF language tags like 'en'
 | 
			
		||||
 | 
			
		||||
## Authentication Options:
 | 
			
		||||
    -u, --username USERNAME    account username
 | 
			
		||||
    -p, --password PASSWORD    account password
 | 
			
		||||
    -n, --netrc                use .netrc authentication data
 | 
			
		||||
 | 
			
		||||
## Post-processing Options:
 | 
			
		||||
    -x, --extract-audio        convert video files to audio-only files (requires
 | 
			
		||||
                               ffmpeg or avconv and ffprobe or avprobe)
 | 
			
		||||
    --audio-format FORMAT      "best", "aac", "vorbis", "mp3", "m4a", "opus", or
 | 
			
		||||
                               "wav"; best by default
 | 
			
		||||
    --audio-quality QUALITY    ffmpeg/avconv audio quality specification, insert
 | 
			
		||||
                               a value between 0 (better) and 9 (worse) for VBR
 | 
			
		||||
                               or a specific bitrate like 128K (default 5)
 | 
			
		||||
    --recode-video FORMAT      Encode the video to another format if necessary
 | 
			
		||||
                               (currently supported: mp4|flv|ogg|webm)
 | 
			
		||||
    -k, --keep-video           keeps the video file on disk after the post-
 | 
			
		||||
                               processing; the video is erased by default
 | 
			
		||||
    --no-post-overwrites       do not overwrite post-processed files; the post-
 | 
			
		||||
                               processed files are overwritten by default
 | 
			
		||||
 | 
			
		||||
# CONFIGURATION
 | 
			
		||||
 | 
			
		||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`.
 | 
			
		||||
 | 
			
		||||
# OUTPUT TEMPLATE
 | 
			
		||||
 | 
			
		||||
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
 | 
			
		||||
 | 
			
		||||
 - `id`: The sequence will be replaced by the video identifier.
 | 
			
		||||
 - `url`: The sequence will be replaced by the video URL.
 | 
			
		||||
 - `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
 | 
			
		||||
 - `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
 | 
			
		||||
 - `title`: The sequence will be replaced by the video title.
 | 
			
		||||
 - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
 | 
			
		||||
 - `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
 | 
			
		||||
 - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
 | 
			
		||||
 - `playlist`: The name or the id of the playlist that contains the video.
 | 
			
		||||
 - `playlist_index`: The index of the video in the playlist, a five-digit number.
 | 
			
		||||
 | 
			
		||||
The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
 | 
			
		||||
 | 
			
		||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
 | 
			
		||||
 | 
			
		||||
    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
 | 
			
		||||
    youtube-dl test video ''_ä↭𝕐.mp4    # All kinds of weird characters
 | 
			
		||||
    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
 | 
			
		||||
    youtube-dl_test_video_.mp4          # A simple file name
 | 
			
		||||
 | 
			
		||||
# FAQ
 | 
			
		||||
 | 
			
		||||
### Can you please put the -b option back?
 | 
			
		||||
 | 
			
		||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the -b option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you''re interested in. In that case, simply request it with the -f option and youtube-dl will try to download it.
 | 
			
		||||
 | 
			
		||||
### I get HTTP error 402 when trying to download a video. What's this?
 | 
			
		||||
 | 
			
		||||
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We''re [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
 | 
			
		||||
 | 
			
		||||
### I have downloaded a video but how can I play it?
 | 
			
		||||
 | 
			
		||||
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
 | 
			
		||||
 | 
			
		||||
### The links provided by youtube-dl -g are not working anymore
 | 
			
		||||
 | 
			
		||||
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
 | 
			
		||||
 | 
			
		||||
### ERROR: no fmt_url_map or conn information found in video info
 | 
			
		||||
 | 
			
		||||
youtube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
 | 
			
		||||
 | 
			
		||||
### ERROR: unable to download video ###
 | 
			
		||||
 | 
			
		||||
youtube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
 | 
			
		||||
 | 
			
		||||
### SyntaxError: Non-ASCII character ###
 | 
			
		||||
 | 
			
		||||
The error
 | 
			
		||||
 | 
			
		||||
    File "youtube-dl", line 2
 | 
			
		||||
    SyntaxError: Non-ASCII character '\x93' ...
 | 
			
		||||
 | 
			
		||||
means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
 | 
			
		||||
 | 
			
		||||
### What is this binary file? Where has the code gone?
 | 
			
		||||
 | 
			
		||||
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
 | 
			
		||||
 | 
			
		||||
### The exe throws a *Runtime error from Visual C++*
 | 
			
		||||
 | 
			
		||||
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
 | 
			
		||||
 | 
			
		||||
# COPYRIGHT
 | 
			
		||||
 | 
			
		||||
youtube-dl is released into the public domain by the copyright holders.
 | 
			
		||||
 | 
			
		||||
This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
 | 
			
		||||
 | 
			
		||||
# BUGS
 | 
			
		||||
 | 
			
		||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>
 | 
			
		||||
 | 
			
		||||
Please include:
 | 
			
		||||
 | 
			
		||||
* Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
 | 
			
		||||
* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
 | 
			
		||||
* The output of `youtube-dl --version`
 | 
			
		||||
* The output of `python --version`
 | 
			
		||||
* The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
 | 
			
		||||
 | 
			
		||||
For discussions, join us in the irc channel #youtube-dl on freenode.
 | 
			
		||||
							
								
								
									
										6
									
								
								bin/youtube-dl
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										6
									
								
								bin/youtube-dl
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import youtube_dl
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    youtube_dl.main()
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								devscripts/SizeOfImage.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								devscripts/SizeOfImage.patch
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								devscripts/SizeOfImage_w.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								devscripts/SizeOfImage_w.patch
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										14
									
								
								devscripts/bash-completion.in
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								devscripts/bash-completion.in
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
__youtube-dl()
 | 
			
		||||
{
 | 
			
		||||
    local cur prev opts
 | 
			
		||||
    COMPREPLY=()
 | 
			
		||||
    cur="${COMP_WORDS[COMP_CWORD]}"
 | 
			
		||||
    opts="{{flags}}"
 | 
			
		||||
 | 
			
		||||
    if [[ ${cur} == * ]] ; then
 | 
			
		||||
        COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
 | 
			
		||||
        return 0
 | 
			
		||||
    fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
complete -F __youtube-dl youtube-dl
 | 
			
		||||
							
								
								
									
										26
									
								
								devscripts/bash-completion.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										26
									
								
								devscripts/bash-completion.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,26 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
import os
 | 
			
		||||
from os.path import dirname as dirn
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
 | 
			
		||||
import youtube_dl
 | 
			
		||||
 | 
			
		||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 | 
			
		||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
 | 
			
		||||
 | 
			
		||||
def build_completion(opt_parser):
 | 
			
		||||
    opts_flag = []
 | 
			
		||||
    for group in opt_parser.option_groups:
 | 
			
		||||
        for option in group.option_list:
 | 
			
		||||
            #for every long flag
 | 
			
		||||
            opts_flag.append(option.get_opt_string())
 | 
			
		||||
    with open(BASH_COMPLETION_TEMPLATE) as f:
 | 
			
		||||
        template = f.read()
 | 
			
		||||
    with open(BASH_COMPLETION_FILE, "w") as f:
 | 
			
		||||
        #just using the special char
 | 
			
		||||
        filled_template = template.replace("{{flags}}", " ".join(opts_flag))
 | 
			
		||||
        f.write(filled_template)
 | 
			
		||||
 | 
			
		||||
parser = youtube_dl.parseOpts()[0]
 | 
			
		||||
build_completion(parser)
 | 
			
		||||
							
								
								
									
										33
									
								
								devscripts/gh-pages/add-version.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										33
									
								
								devscripts/gh-pages/add-version.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,33 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import sys
 | 
			
		||||
import hashlib
 | 
			
		||||
import urllib.request
 | 
			
		||||
 | 
			
		||||
if len(sys.argv) <= 1:
 | 
			
		||||
	print('Specify the version number as parameter')
 | 
			
		||||
	sys.exit()
 | 
			
		||||
version = sys.argv[1]
 | 
			
		||||
 | 
			
		||||
with open('update/LATEST_VERSION', 'w') as f:
 | 
			
		||||
	f.write(version)
 | 
			
		||||
 | 
			
		||||
versions_info = json.load(open('update/versions.json'))
 | 
			
		||||
if 'signature' in versions_info:
 | 
			
		||||
	del versions_info['signature']
 | 
			
		||||
 | 
			
		||||
new_version = {}
 | 
			
		||||
 | 
			
		||||
filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
 | 
			
		||||
for key, filename in filenames.items():
 | 
			
		||||
	print('Downloading and checksumming %s...' %filename)
 | 
			
		||||
	url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
 | 
			
		||||
	data = urllib.request.urlopen(url).read()
 | 
			
		||||
	sha256sum = hashlib.sha256(data).hexdigest()
 | 
			
		||||
	new_version[key] = (url, sha256sum)
 | 
			
		||||
 | 
			
		||||
versions_info['versions'][version] = new_version
 | 
			
		||||
versions_info['latest'] = version
 | 
			
		||||
 | 
			
		||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
 | 
			
		||||
							
								
								
									
										32
									
								
								devscripts/gh-pages/generate-download.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										32
									
								
								devscripts/gh-pages/generate-download.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
import hashlib
 | 
			
		||||
import shutil
 | 
			
		||||
import subprocess
 | 
			
		||||
import tempfile
 | 
			
		||||
import urllib.request
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
versions_info = json.load(open('update/versions.json'))
 | 
			
		||||
version = versions_info['latest']
 | 
			
		||||
URL = versions_info['versions'][version]['bin'][0]
 | 
			
		||||
 | 
			
		||||
data = urllib.request.urlopen(URL).read()
 | 
			
		||||
 | 
			
		||||
# Read template page
 | 
			
		||||
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
 | 
			
		||||
    template = tmplf.read()
 | 
			
		||||
 | 
			
		||||
md5sum = hashlib.md5(data).hexdigest()
 | 
			
		||||
sha1sum = hashlib.sha1(data).hexdigest()
 | 
			
		||||
sha256sum = hashlib.sha256(data).hexdigest()
 | 
			
		||||
template = template.replace('@PROGRAM_VERSION@', version)
 | 
			
		||||
template = template.replace('@PROGRAM_URL@', URL)
 | 
			
		||||
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
 | 
			
		||||
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
 | 
			
		||||
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
 | 
			
		||||
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
 | 
			
		||||
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
 | 
			
		||||
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
 | 
			
		||||
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
 | 
			
		||||
with open('download.html', 'w', encoding='utf-8') as dlf:
 | 
			
		||||
    dlf.write(template)
 | 
			
		||||
							
								
								
									
										32
									
								
								devscripts/gh-pages/sign-versions.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										32
									
								
								devscripts/gh-pages/sign-versions.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
 | 
			
		||||
import rsa
 | 
			
		||||
import json
 | 
			
		||||
from binascii import hexlify
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    input = raw_input
 | 
			
		||||
except NameError:
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
versions_info = json.load(open('update/versions.json'))
 | 
			
		||||
if 'signature' in versions_info:
 | 
			
		||||
	del versions_info['signature']
 | 
			
		||||
 | 
			
		||||
print('Enter the PKCS1 private key, followed by a blank line:')
 | 
			
		||||
privkey = b''
 | 
			
		||||
while True:
 | 
			
		||||
	try:
 | 
			
		||||
		line = input()
 | 
			
		||||
	except EOFError:
 | 
			
		||||
		break
 | 
			
		||||
	if line == '':
 | 
			
		||||
		break
 | 
			
		||||
	privkey += line.encode('ascii') + b'\n'
 | 
			
		||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
 | 
			
		||||
 | 
			
		||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
 | 
			
		||||
print('signature: ' + signature)
 | 
			
		||||
 | 
			
		||||
versions_info['signature'] = signature
 | 
			
		||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
 | 
			
		||||
							
								
								
									
										21
									
								
								devscripts/gh-pages/update-copyright.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								devscripts/gh-pages/update-copyright.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,21 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
 | 
			
		||||
from __future__ import with_statement
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
import glob
 | 
			
		||||
import io # For Python 2 compatibilty
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
year = str(datetime.datetime.now().year)
 | 
			
		||||
for fn in glob.glob('*.html*'):
 | 
			
		||||
    with io.open(fn, encoding='utf-8') as f:
 | 
			
		||||
        content = f.read()
 | 
			
		||||
    newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
 | 
			
		||||
    if content != newc:
 | 
			
		||||
        tmpFn = fn + '.part'
 | 
			
		||||
        with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
 | 
			
		||||
            outf.write(newc)
 | 
			
		||||
        os.rename(tmpFn, fn)
 | 
			
		||||
							
								
								
									
										57
									
								
								devscripts/gh-pages/update-feed.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										57
									
								
								devscripts/gh-pages/update-feed.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,57 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
 | 
			
		||||
import textwrap
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
atom_template=textwrap.dedent("""\
 | 
			
		||||
								<?xml version='1.0' encoding='utf-8'?>
 | 
			
		||||
								<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
 | 
			
		||||
									<atom:title>youtube-dl releases</atom:title>
 | 
			
		||||
									<atom:id>youtube-dl-updates-feed</atom:id>
 | 
			
		||||
									<atom:updated>@TIMESTAMP@</atom:updated>
 | 
			
		||||
									@ENTRIES@
 | 
			
		||||
								</atom:feed>""")
 | 
			
		||||
 | 
			
		||||
entry_template=textwrap.dedent("""
 | 
			
		||||
								<atom:entry>
 | 
			
		||||
									<atom:id>youtube-dl-@VERSION@</atom:id>
 | 
			
		||||
									<atom:title>New version @VERSION@</atom:title>
 | 
			
		||||
									<atom:link href="http://rg3.github.io/youtube-dl" />
 | 
			
		||||
									<atom:content type="xhtml">
 | 
			
		||||
										<div xmlns="http://www.w3.org/1999/xhtml">
 | 
			
		||||
											Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
 | 
			
		||||
										</div>
 | 
			
		||||
									</atom:content>
 | 
			
		||||
									<atom:author>
 | 
			
		||||
										<atom:name>The youtube-dl maintainers</atom:name>
 | 
			
		||||
									</atom:author>
 | 
			
		||||
									<atom:updated>@TIMESTAMP@</atom:updated>
 | 
			
		||||
								</atom:entry>
 | 
			
		||||
								""")
 | 
			
		||||
 | 
			
		||||
now = datetime.datetime.now()
 | 
			
		||||
now_iso = now.isoformat()
 | 
			
		||||
 | 
			
		||||
atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
 | 
			
		||||
 | 
			
		||||
entries=[]
 | 
			
		||||
 | 
			
		||||
versions_info = json.load(open('update/versions.json'))
 | 
			
		||||
versions = list(versions_info['versions'].keys())
 | 
			
		||||
versions.sort()
 | 
			
		||||
 | 
			
		||||
for v in versions:
 | 
			
		||||
	entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
 | 
			
		||||
	entry = entry.replace('@VERSION@',v)
 | 
			
		||||
	entries.append(entry)
 | 
			
		||||
 | 
			
		||||
entries_str = textwrap.indent(''.join(entries), '\t')
 | 
			
		||||
atom_template = atom_template.replace('@ENTRIES@', entries_str)
 | 
			
		||||
 | 
			
		||||
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
 | 
			
		||||
	atom_file.write(atom_template)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										20
									
								
								devscripts/make_readme.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										20
									
								
								devscripts/make_readme.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
import sys
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
README_FILE = 'README.md'
 | 
			
		||||
helptext = sys.stdin.read()
 | 
			
		||||
 | 
			
		||||
with open(README_FILE) as f:
 | 
			
		||||
    oldreadme = f.read()
 | 
			
		||||
 | 
			
		||||
header = oldreadme[:oldreadme.index('# OPTIONS')]
 | 
			
		||||
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
 | 
			
		||||
 | 
			
		||||
options = helptext[helptext.index('  General Options:')+19:]
 | 
			
		||||
options = re.sub(r'^  (\w.+)$', r'## \1', options, flags=re.M)
 | 
			
		||||
options = '# OPTIONS\n' + options + '\n'
 | 
			
		||||
 | 
			
		||||
with open(README_FILE, 'w') as f:
 | 
			
		||||
    f.write(header)
 | 
			
		||||
    f.write(options)
 | 
			
		||||
    f.write(footer)
 | 
			
		||||
							
								
								
									
										6
									
								
								devscripts/posix-locale.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										6
									
								
								devscripts/posix-locale.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
 | 
			
		||||
# source this file in your shell to get a POSIX locale (which will break many programs, but that's kind of the point)
 | 
			
		||||
 | 
			
		||||
export LC_ALL=POSIX
 | 
			
		||||
export LANG=POSIX
 | 
			
		||||
export LANGUAGE=POSIX
 | 
			
		||||
							
								
								
									
										92
									
								
								devscripts/release.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										92
									
								
								devscripts/release.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,92 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# IMPORTANT: the following assumptions are made
 | 
			
		||||
# * the GH repo is on the origin remote
 | 
			
		||||
# * the gh-pages branch is named so locally
 | 
			
		||||
# * the git config user.signingkey is properly set
 | 
			
		||||
 | 
			
		||||
# You will need
 | 
			
		||||
# pip install coverage nose rsa
 | 
			
		||||
 | 
			
		||||
# TODO
 | 
			
		||||
# release notes
 | 
			
		||||
# make hash on local files
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
 | 
			
		||||
version="$1"
 | 
			
		||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
 | 
			
		||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
 | 
			
		||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### First of all, testing..."
 | 
			
		||||
make cleanall
 | 
			
		||||
nosetests --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### Changing version in version.py..."
 | 
			
		||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
 | 
			
		||||
make README.md
 | 
			
		||||
git add CHANGELOG README.md youtube_dl/version.py
 | 
			
		||||
git commit -m "release $version"
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### Now tagging, signing and pushing..."
 | 
			
		||||
git tag -s -m "Release $version" "$version"
 | 
			
		||||
git show "$version"
 | 
			
		||||
read -p "Is it good, can I push? (y/n) " -n 1
 | 
			
		||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
 | 
			
		||||
echo
 | 
			
		||||
MASTER=$(git rev-parse --abbrev-ref HEAD)
 | 
			
		||||
git push origin $MASTER:master
 | 
			
		||||
git push origin "$version"
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### OK, now it is time to build the binaries..."
 | 
			
		||||
REV=$(git rev-parse HEAD)
 | 
			
		||||
make youtube-dl youtube-dl.tar.gz
 | 
			
		||||
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
 | 
			
		||||
	wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
 | 
			
		||||
mkdir -p "build/$version"
 | 
			
		||||
mv youtube-dl youtube-dl.exe "build/$version"
 | 
			
		||||
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
 | 
			
		||||
RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
 | 
			
		||||
(cd build/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
 | 
			
		||||
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
 | 
			
		||||
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
 | 
			
		||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
 | 
			
		||||
git checkout HEAD -- youtube-dl youtube-dl.exe
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### Signing and uploading the new binaries to youtube-dl.org..."
 | 
			
		||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
 | 
			
		||||
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### Now switching to gh-pages..."
 | 
			
		||||
git clone --branch gh-pages --single-branch . build/gh-pages
 | 
			
		||||
ROOT=$(pwd)
 | 
			
		||||
(
 | 
			
		||||
    set -e
 | 
			
		||||
    ORIGIN_URL=$(git config --get remote.origin.url)
 | 
			
		||||
    cd build/gh-pages
 | 
			
		||||
    "$ROOT/devscripts/gh-pages/add-version.py" $version
 | 
			
		||||
    "$ROOT/devscripts/gh-pages/update-feed.py"
 | 
			
		||||
    "$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
 | 
			
		||||
    "$ROOT/devscripts/gh-pages/generate-download.py"
 | 
			
		||||
    "$ROOT/devscripts/gh-pages/update-copyright.py"
 | 
			
		||||
    git add *.html *.html.in update
 | 
			
		||||
    git commit -m "release $version"
 | 
			
		||||
    git show HEAD
 | 
			
		||||
    read -p "Is it good, can I push? (y/n) " -n 1
 | 
			
		||||
    if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
 | 
			
		||||
    echo
 | 
			
		||||
    git push "$ROOT" gh-pages
 | 
			
		||||
    git push "$ORIGIN_URL" gh-pages
 | 
			
		||||
)
 | 
			
		||||
rm -rf build
 | 
			
		||||
 | 
			
		||||
make pypi-files
 | 
			
		||||
echo "Uploading to PyPi ..."
 | 
			
		||||
python setup.py sdist upload
 | 
			
		||||
make clean
 | 
			
		||||
 | 
			
		||||
/bin/echo -e "\n### DONE!"
 | 
			
		||||
							
								
								
									
										40
									
								
								devscripts/transition_helper.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								devscripts/transition_helper.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys, os
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.request as compat_urllib_request
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_request
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
 | 
			
		||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
 | 
			
		||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
	raw_input()
 | 
			
		||||
except NameError: # Python 3
 | 
			
		||||
	input()
 | 
			
		||||
 | 
			
		||||
filename = sys.argv[0]
 | 
			
		||||
 | 
			
		||||
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
 | 
			
		||||
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
 | 
			
		||||
 | 
			
		||||
if not os.access(filename, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % filename)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    urlh = compat_urllib_request.urlopen(BIN_URL)
 | 
			
		||||
    newcontent = urlh.read()
 | 
			
		||||
    urlh.close()
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to download latest version')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    with open(filename, 'wb') as outf:
 | 
			
		||||
        outf.write(newcontent)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to overwrite current version')
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
 | 
			
		||||
							
								
								
									
										12
									
								
								devscripts/transition_helper_exe/setup.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								devscripts/transition_helper_exe/setup.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
			
		||||
from distutils.core import setup
 | 
			
		||||
import py2exe
 | 
			
		||||
 | 
			
		||||
py2exe_options = {
 | 
			
		||||
    "bundle_files": 1,
 | 
			
		||||
    "compressed": 1,
 | 
			
		||||
    "optimize": 2,
 | 
			
		||||
    "dist_dir": '.',
 | 
			
		||||
    "dll_excludes": ['w9xpopen.exe']
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
 | 
			
		||||
							
								
								
									
										102
									
								
								devscripts/transition_helper_exe/youtube-dl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								devscripts/transition_helper_exe/youtube-dl.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,102 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys, os
 | 
			
		||||
import urllib2
 | 
			
		||||
import json, hashlib
 | 
			
		||||
 | 
			
		||||
def rsa_verify(message, signature, key):
 | 
			
		||||
    from struct import pack
 | 
			
		||||
    from hashlib import sha256
 | 
			
		||||
    from sys import version_info
 | 
			
		||||
    def b(x):
 | 
			
		||||
        if version_info[0] == 2: return x
 | 
			
		||||
        else: return x.encode('latin1')
 | 
			
		||||
    assert(type(message) == type(b('')))
 | 
			
		||||
    block_size = 0
 | 
			
		||||
    n = key[0]
 | 
			
		||||
    while n:
 | 
			
		||||
        block_size += 1
 | 
			
		||||
        n >>= 8
 | 
			
		||||
    signature = pow(int(signature, 16), key[1], key[0])
 | 
			
		||||
    raw_bytes = []
 | 
			
		||||
    while signature:
 | 
			
		||||
        raw_bytes.insert(0, pack("B", signature & 0xFF))
 | 
			
		||||
        signature >>= 8
 | 
			
		||||
    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
 | 
			
		||||
    if signature[0:2] != b('\x00\x01'): return False
 | 
			
		||||
    signature = signature[2:]
 | 
			
		||||
    if not b('\x00') in signature: return False
 | 
			
		||||
    signature = signature[signature.index(b('\x00'))+1:]
 | 
			
		||||
    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
 | 
			
		||||
    signature = signature[19:]
 | 
			
		||||
    if signature != sha256(message).digest(): return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
 | 
			
		||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
 | 
			
		||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
 | 
			
		||||
 | 
			
		||||
raw_input()
 | 
			
		||||
 | 
			
		||||
filename = sys.argv[0]
 | 
			
		||||
 | 
			
		||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
 | 
			
		||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
 | 
			
		||||
JSON_URL = UPDATE_URL + 'versions.json'
 | 
			
		||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
 | 
			
		||||
 | 
			
		||||
if not os.access(filename, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % filename)
 | 
			
		||||
 | 
			
		||||
exe = os.path.abspath(filename)
 | 
			
		||||
directory = os.path.dirname(exe)
 | 
			
		||||
if not os.access(directory, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % directory)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
 | 
			
		||||
    versions_info = json.loads(versions_info)
 | 
			
		||||
except:
 | 
			
		||||
    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
 | 
			
		||||
if not 'signature' in versions_info:
 | 
			
		||||
    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
 | 
			
		||||
signature = versions_info['signature']
 | 
			
		||||
del versions_info['signature']
 | 
			
		||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
 | 
			
		||||
    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
 | 
			
		||||
 | 
			
		||||
version = versions_info['versions'][versions_info['latest']]
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    urlh = urllib2.urlopen(version['exe'][0])
 | 
			
		||||
    newcontent = urlh.read()
 | 
			
		||||
    urlh.close()
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to download latest version')
 | 
			
		||||
 | 
			
		||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
 | 
			
		||||
if newcontent_hash != version['exe'][1]:
 | 
			
		||||
    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    with open(exe + '.new', 'wb') as outf:
 | 
			
		||||
        outf.write(newcontent)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit(u'ERROR: unable to write the new version')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    bat = os.path.join(directory, 'youtube-dl-updater.bat')
 | 
			
		||||
    b = open(bat, 'w')
 | 
			
		||||
    b.write("""
 | 
			
		||||
echo Updating youtube-dl...
 | 
			
		||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
 | 
			
		||||
move /Y "%s.new" "%s"
 | 
			
		||||
del "%s"
 | 
			
		||||
    \n""" %(exe, exe, bat))
 | 
			
		||||
    b.close()
 | 
			
		||||
 | 
			
		||||
    os.startfile(bat)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to overwrite current version')
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
 | 
			
		||||
							
								
								
									
										56
									
								
								devscripts/wine-py2exe.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										56
									
								
								devscripts/wine-py2exe.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,56 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Run with as parameter a setup.py that works in the current directory
 | 
			
		||||
# e.g. no os.chdir()
 | 
			
		||||
# It will run twice, the first time will crash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
 | 
			
		||||
 | 
			
		||||
if [ ! -d wine-py2exe ]; then
 | 
			
		||||
 | 
			
		||||
    sudo apt-get install wine1.3 axel bsdiff
 | 
			
		||||
 | 
			
		||||
    mkdir wine-py2exe
 | 
			
		||||
    cd wine-py2exe
 | 
			
		||||
    export WINEPREFIX=`pwd`
 | 
			
		||||
 | 
			
		||||
    axel -a "http://www.python.org/ftp/python/2.7/python-2.7.msi"
 | 
			
		||||
    axel -a "http://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.7.exe"
 | 
			
		||||
    #axel -a "http://winetricks.org/winetricks"
 | 
			
		||||
 | 
			
		||||
    # http://appdb.winehq.org/objectManager.php?sClass=version&iId=21957
 | 
			
		||||
    echo "Follow python setup on screen"
 | 
			
		||||
    wine msiexec /i python-2.7.msi
 | 
			
		||||
    
 | 
			
		||||
    echo "Follow py2exe setup on screen"
 | 
			
		||||
    wine py2exe-0.6.9.win32-py2.7.exe
 | 
			
		||||
    
 | 
			
		||||
    #echo "Follow Microsoft Visual C++ 2008 Redistributable Package setup on screen"
 | 
			
		||||
    #bash winetricks vcrun2008
 | 
			
		||||
 | 
			
		||||
    rm py2exe-0.6.9.win32-py2.7.exe
 | 
			
		||||
    rm python-2.7.msi
 | 
			
		||||
    #rm winetricks
 | 
			
		||||
    
 | 
			
		||||
    # http://bugs.winehq.org/show_bug.cgi?id=3591
 | 
			
		||||
    
 | 
			
		||||
    mv drive_c/Python27/Lib/site-packages/py2exe/run.exe drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup
 | 
			
		||||
    bspatch drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run.exe "$SCRIPT_DIR/SizeOfImage.patch"
 | 
			
		||||
    mv drive_c/Python27/Lib/site-packages/py2exe/run_w.exe drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup
 | 
			
		||||
    bspatch drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run_w.exe "$SCRIPT_DIR/SizeOfImage_w.patch"
 | 
			
		||||
 | 
			
		||||
    cd -
 | 
			
		||||
    
 | 
			
		||||
else
 | 
			
		||||
 | 
			
		||||
    export WINEPREFIX="$( cd wine-py2exe && pwd )"
 | 
			
		||||
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
wine "C:\\Python27\\python.exe" "$1" py2exe > "py2exe.log" 2>&1 || true
 | 
			
		||||
echo '# Copying python27.dll' >> "py2exe.log"
 | 
			
		||||
cp "$WINEPREFIX/drive_c/windows/system32/python27.dll" build/bdist.win32/winexe/bundle-2.7/
 | 
			
		||||
wine "C:\\Python27\\python.exe" "$1" py2exe >> "py2exe.log" 2>&1
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										78
									
								
								setup.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								setup.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,78 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
import pkg_resources
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from setuptools import setup
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from distutils.core import setup
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import py2exe
 | 
			
		||||
    """This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
 | 
			
		||||
except ImportError:
 | 
			
		||||
    if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
 | 
			
		||||
        print("Cannot import py2exe", file=sys.stderr)
 | 
			
		||||
        exit(1)
 | 
			
		||||
 | 
			
		||||
py2exe_options = {
 | 
			
		||||
    "bundle_files": 1,
 | 
			
		||||
    "compressed": 1,
 | 
			
		||||
    "optimize": 2,
 | 
			
		||||
    "dist_dir": '.',
 | 
			
		||||
    "dll_excludes": ['w9xpopen.exe']
 | 
			
		||||
}
 | 
			
		||||
py2exe_console = [{
 | 
			
		||||
    "script": "./youtube_dl/__main__.py",
 | 
			
		||||
    "dest_base": "youtube-dl",
 | 
			
		||||
}]
 | 
			
		||||
py2exe_params = {
 | 
			
		||||
    'console': py2exe_console,
 | 
			
		||||
    'options': { "py2exe": py2exe_options },
 | 
			
		||||
    'zipfile': None
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
 | 
			
		||||
    params = py2exe_params
 | 
			
		||||
else:
 | 
			
		||||
    params = {
 | 
			
		||||
        'scripts': ['bin/youtube-dl'],
 | 
			
		||||
        'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo...
 | 
			
		||||
                       ('share/doc/youtube_dl', ['README.txt']),
 | 
			
		||||
                       ('share/man/man1/', ['youtube-dl.1'])]
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
# Get the version from youtube_dl/version.py without importing the package
 | 
			
		||||
exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec'))
 | 
			
		||||
 | 
			
		||||
setup(
 | 
			
		||||
    name = 'youtube_dl',
 | 
			
		||||
    version = __version__,
 | 
			
		||||
    description = 'YouTube video downloader',
 | 
			
		||||
    long_description = 'Small command-line program to download videos from YouTube.com and other video sites.',
 | 
			
		||||
    url = 'https://github.com/rg3/youtube-dl',
 | 
			
		||||
    author = 'Ricardo Garcia',
 | 
			
		||||
    maintainer = 'Philipp Hagemeister',
 | 
			
		||||
    maintainer_email = 'phihag@phihag.de',
 | 
			
		||||
    packages = ['youtube_dl'],
 | 
			
		||||
 | 
			
		||||
    # Provokes warning on most systems (why?!)
 | 
			
		||||
    #test_suite = 'nose.collector',
 | 
			
		||||
    #test_requires = ['nosetest'],
 | 
			
		||||
 | 
			
		||||
    classifiers = [
 | 
			
		||||
        "Topic :: Multimedia :: Video",
 | 
			
		||||
        "Development Status :: 5 - Production/Stable",
 | 
			
		||||
        "Environment :: Console",
 | 
			
		||||
        "License :: Public Domain",
 | 
			
		||||
        "Programming Language :: Python :: 2.6",
 | 
			
		||||
        "Programming Language :: Python :: 2.7",
 | 
			
		||||
        "Programming Language :: Python :: 3",
 | 
			
		||||
        "Programming Language :: Python :: 3.3"
 | 
			
		||||
    ],
 | 
			
		||||
 | 
			
		||||
    **params
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										44
									
								
								test/parameters.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								test/parameters.json
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,44 @@
 | 
			
		||||
{
 | 
			
		||||
    "consoletitle": false, 
 | 
			
		||||
    "continuedl": true, 
 | 
			
		||||
    "forcedescription": false, 
 | 
			
		||||
    "forcefilename": false, 
 | 
			
		||||
    "forceformat": false, 
 | 
			
		||||
    "forcethumbnail": false, 
 | 
			
		||||
    "forcetitle": false, 
 | 
			
		||||
    "forceurl": false, 
 | 
			
		||||
    "format": null, 
 | 
			
		||||
    "format_limit": null, 
 | 
			
		||||
    "ignoreerrors": false, 
 | 
			
		||||
    "listformats": null, 
 | 
			
		||||
    "logtostderr": false, 
 | 
			
		||||
    "matchtitle": null, 
 | 
			
		||||
    "max_downloads": null, 
 | 
			
		||||
    "nooverwrites": false, 
 | 
			
		||||
    "nopart": false, 
 | 
			
		||||
    "noprogress": false, 
 | 
			
		||||
    "outtmpl": "%(id)s.%(ext)s", 
 | 
			
		||||
    "password": null, 
 | 
			
		||||
    "playlistend": -1, 
 | 
			
		||||
    "playliststart": 1, 
 | 
			
		||||
    "prefer_free_formats": false, 
 | 
			
		||||
    "quiet": false, 
 | 
			
		||||
    "ratelimit": null, 
 | 
			
		||||
    "rejecttitle": null, 
 | 
			
		||||
    "retries": 10, 
 | 
			
		||||
    "simulate": false, 
 | 
			
		||||
    "skip_download": false, 
 | 
			
		||||
    "subtitleslang": null, 
 | 
			
		||||
    "subtitlesformat": "srt",
 | 
			
		||||
    "test": true, 
 | 
			
		||||
    "updatetime": true, 
 | 
			
		||||
    "usenetrc": false, 
 | 
			
		||||
    "username": null, 
 | 
			
		||||
    "verbose": true, 
 | 
			
		||||
    "writedescription": false, 
 | 
			
		||||
    "writeinfojson": true, 
 | 
			
		||||
    "writesubtitles": false,
 | 
			
		||||
    "onlysubtitles": false,
 | 
			
		||||
    "allsubtitles": false,
 | 
			
		||||
    "listssubtitles": false
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										38
									
								
								test/test_all_urls.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								test/test_all_urls.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,38 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE
 | 
			
		||||
 | 
			
		||||
class TestAllURLsMatching(unittest.TestCase):
 | 
			
		||||
    def test_youtube_playlist_matching(self):
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'UUBABnxM4Ar9ten8Mdjj1j0Q')) #585
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'PL63F0C78739B09958'))
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q'))
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC'))
 | 
			
		||||
        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
 | 
			
		||||
        self.assertFalse(YoutubePlaylistIE.suitable(u'PLtS2H6bU1M'))
 | 
			
		||||
 | 
			
		||||
    def test_youtube_matching(self):
 | 
			
		||||
        self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
 | 
			
		||||
        self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
 | 
			
		||||
 | 
			
		||||
    def test_youtube_channel_matching(self):
 | 
			
		||||
        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM'))
 | 
			
		||||
        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec'))
 | 
			
		||||
        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM/videos'))
 | 
			
		||||
 | 
			
		||||
    def test_youtube_extract(self):
 | 
			
		||||
        self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
 | 
			
		||||
        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
 | 
			
		||||
        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										142
									
								
								test/test_download.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								test/test_download.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,142 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import errno
 | 
			
		||||
import hashlib
 | 
			
		||||
import io
 | 
			
		||||
import os
 | 
			
		||||
import json
 | 
			
		||||
import unittest
 | 
			
		||||
import sys
 | 
			
		||||
import hashlib
 | 
			
		||||
import socket
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
import youtube_dl.FileDownloader
 | 
			
		||||
import youtube_dl.InfoExtractors
 | 
			
		||||
from youtube_dl.utils import *
 | 
			
		||||
 | 
			
		||||
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
 | 
			
		||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 | 
			
		||||
 | 
			
		||||
RETRIES = 3
 | 
			
		||||
 | 
			
		||||
# General configuration (from __init__, not very elegant...)
 | 
			
		||||
jar = compat_cookiejar.CookieJar()
 | 
			
		||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
 | 
			
		||||
proxy_handler = compat_urllib_request.ProxyHandler()
 | 
			
		||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
 | 
			
		||||
compat_urllib_request.install_opener(opener)
 | 
			
		||||
socket.setdefaulttimeout(10)
 | 
			
		||||
 | 
			
		||||
def _try_rm(filename):
 | 
			
		||||
    """ Remove a file if it exists """
 | 
			
		||||
    try:
 | 
			
		||||
        os.remove(filename)
 | 
			
		||||
    except OSError as ose:
 | 
			
		||||
        if ose.errno != errno.ENOENT:
 | 
			
		||||
            raise
 | 
			
		||||
 | 
			
		||||
class FileDownloader(youtube_dl.FileDownloader):
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        self.to_stderr = self.to_screen
 | 
			
		||||
        self.processed_info_dicts = []
 | 
			
		||||
        return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
 | 
			
		||||
    def process_info(self, info_dict):
 | 
			
		||||
        self.processed_info_dicts.append(info_dict)
 | 
			
		||||
        return youtube_dl.FileDownloader.process_info(self, info_dict)
 | 
			
		||||
 | 
			
		||||
def _file_md5(fn):
 | 
			
		||||
    with open(fn, 'rb') as f:
 | 
			
		||||
        return hashlib.md5(f.read()).hexdigest()
 | 
			
		||||
 | 
			
		||||
with io.open(DEF_FILE, encoding='utf-8') as deff:
 | 
			
		||||
    defs = json.load(deff)
 | 
			
		||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
 | 
			
		||||
    parameters = json.load(pf)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestDownload(unittest.TestCase):
 | 
			
		||||
    maxDiff = None
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        self.parameters = parameters
 | 
			
		||||
        self.defs = defs
 | 
			
		||||
 | 
			
		||||
### Dynamically generate tests
 | 
			
		||||
def generator(test_case):
 | 
			
		||||
 | 
			
		||||
    def test_template(self):
 | 
			
		||||
        ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])#getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
 | 
			
		||||
        if not ie._WORKING:
 | 
			
		||||
            print('Skipping: IE marked as not _WORKING')
 | 
			
		||||
            return
 | 
			
		||||
        if 'playlist' not in test_case and not test_case['file']:
 | 
			
		||||
            print('Skipping: No output file specified')
 | 
			
		||||
            return
 | 
			
		||||
        if 'skip' in test_case:
 | 
			
		||||
            print('Skipping: {0}'.format(test_case['skip']))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        params = self.parameters.copy()
 | 
			
		||||
        params.update(test_case.get('params', {}))
 | 
			
		||||
 | 
			
		||||
        fd = FileDownloader(params)
 | 
			
		||||
        for ie in youtube_dl.InfoExtractors.gen_extractors():
 | 
			
		||||
            fd.add_info_extractor(ie)
 | 
			
		||||
        finished_hook_called = set()
 | 
			
		||||
        def _hook(status):
 | 
			
		||||
            if status['status'] == 'finished':
 | 
			
		||||
                finished_hook_called.add(status['filename'])
 | 
			
		||||
        fd.add_progress_hook(_hook)
 | 
			
		||||
 | 
			
		||||
        test_cases = test_case.get('playlist', [test_case])
 | 
			
		||||
        for tc in test_cases:
 | 
			
		||||
            _try_rm(tc['file'])
 | 
			
		||||
            _try_rm(tc['file'] + '.part')
 | 
			
		||||
            _try_rm(tc['file'] + '.info.json')
 | 
			
		||||
        try:
 | 
			
		||||
            for retry in range(1, RETRIES + 1):
 | 
			
		||||
                try:
 | 
			
		||||
                    fd.download([test_case['url']])
 | 
			
		||||
                except (DownloadError, ExtractorError) as err:
 | 
			
		||||
                    if retry == RETRIES: raise
 | 
			
		||||
 | 
			
		||||
                    # Check if the exception is not a network related one
 | 
			
		||||
                    if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
 | 
			
		||||
                        raise
 | 
			
		||||
 | 
			
		||||
                    print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
 | 
			
		||||
                else:
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
            for tc in test_cases:
 | 
			
		||||
                if not test_case.get('params', {}).get('skip_download', False):
 | 
			
		||||
                    self.assertTrue(os.path.exists(tc['file']), msg='Missing file ' + tc['file'])
 | 
			
		||||
                    self.assertTrue(tc['file'] in finished_hook_called)
 | 
			
		||||
                self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
 | 
			
		||||
                if 'md5' in tc:
 | 
			
		||||
                    md5_for_file = _file_md5(tc['file'])
 | 
			
		||||
                    self.assertEqual(md5_for_file, tc['md5'])
 | 
			
		||||
                with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
 | 
			
		||||
                    info_dict = json.load(infof)
 | 
			
		||||
                for (info_field, value) in tc.get('info_dict', {}).items():
 | 
			
		||||
                    self.assertEqual(value, info_dict.get(info_field))
 | 
			
		||||
        finally:
 | 
			
		||||
            for tc in test_cases:
 | 
			
		||||
                _try_rm(tc['file'])
 | 
			
		||||
                _try_rm(tc['file'] + '.part')
 | 
			
		||||
                _try_rm(tc['file'] + '.info.json')
 | 
			
		||||
 | 
			
		||||
    return test_template
 | 
			
		||||
 | 
			
		||||
### And add them to TestDownload
 | 
			
		||||
for test_case in defs:
 | 
			
		||||
    test_method = generator(test_case)
 | 
			
		||||
    test_method.__name__ = "test_{0}".format(test_case["name"])
 | 
			
		||||
    setattr(TestDownload, test_method.__name__, test_method)
 | 
			
		||||
    del test_method
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										26
									
								
								test/test_execution.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								test/test_execution.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,26 @@
 | 
			
		||||
import unittest
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    _DEV_NULL = subprocess.DEVNULL
 | 
			
		||||
except AttributeError:
 | 
			
		||||
    _DEV_NULL = open(os.devnull, 'wb')
 | 
			
		||||
 | 
			
		||||
class TestExecution(unittest.TestCase):
 | 
			
		||||
    def test_import(self):
 | 
			
		||||
        subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
 | 
			
		||||
 | 
			
		||||
    def test_module_exec(self):
 | 
			
		||||
        if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
 | 
			
		||||
            subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
 | 
			
		||||
 | 
			
		||||
    def test_main_exec(self):
 | 
			
		||||
        subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										100
									
								
								test/test_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								test/test_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
# Various small unit tests
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
#from youtube_dl.utils import htmlentity_transform
 | 
			
		||||
from youtube_dl.utils import timeconvert
 | 
			
		||||
from youtube_dl.utils import sanitize_filename
 | 
			
		||||
from youtube_dl.utils import unescapeHTML
 | 
			
		||||
from youtube_dl.utils import orderedSet
 | 
			
		||||
 | 
			
		||||
if sys.version_info < (3, 0):
 | 
			
		||||
    _compat_str = lambda b: b.decode('unicode-escape')
 | 
			
		||||
else:
 | 
			
		||||
    _compat_str = lambda s: s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestUtil(unittest.TestCase):
 | 
			
		||||
    def test_timeconvert(self):
 | 
			
		||||
        self.assertTrue(timeconvert('') is None)
 | 
			
		||||
        self.assertTrue(timeconvert('bougrg') is None)
 | 
			
		||||
 | 
			
		||||
    def test_sanitize_filename(self):
 | 
			
		||||
        self.assertEqual(sanitize_filename('abc'), 'abc')
 | 
			
		||||
        self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(sanitize_filename('123'), '123')
 | 
			
		||||
 | 
			
		||||
        self.assertEqual('abc_de', sanitize_filename('abc/de'))
 | 
			
		||||
        self.assertFalse('/' in sanitize_filename('abc/de///'))
 | 
			
		||||
 | 
			
		||||
        self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
 | 
			
		||||
        self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
 | 
			
		||||
        self.assertEqual('yes no', sanitize_filename('yes? no'))
 | 
			
		||||
        self.assertEqual('this - that', sanitize_filename('this: that'))
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
 | 
			
		||||
        aumlaut = _compat_str('\xe4')
 | 
			
		||||
        self.assertEqual(sanitize_filename(aumlaut), aumlaut)
 | 
			
		||||
        tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
 | 
			
		||||
        self.assertEqual(sanitize_filename(tests), tests)
 | 
			
		||||
 | 
			
		||||
        forbidden = '"\0\\/'
 | 
			
		||||
        for fc in forbidden:
 | 
			
		||||
            for fbc in forbidden:
 | 
			
		||||
                self.assertTrue(fbc not in sanitize_filename(fc))
 | 
			
		||||
 | 
			
		||||
    def test_sanitize_filename_restricted(self):
 | 
			
		||||
        self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
 | 
			
		||||
        self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
 | 
			
		||||
 | 
			
		||||
        self.assertEqual(sanitize_filename('123', restricted=True), '123')
 | 
			
		||||
 | 
			
		||||
        self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
 | 
			
		||||
        self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
 | 
			
		||||
 | 
			
		||||
        self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
 | 
			
		||||
        self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
 | 
			
		||||
        self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
 | 
			
		||||
        self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
 | 
			
		||||
 | 
			
		||||
        tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
 | 
			
		||||
        self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
 | 
			
		||||
        self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '')  # No empty filename
 | 
			
		||||
 | 
			
		||||
        forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
 | 
			
		||||
        for fc in forbidden:
 | 
			
		||||
            for fbc in forbidden:
 | 
			
		||||
                self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
 | 
			
		||||
 | 
			
		||||
        # Handle a common case more neatly
 | 
			
		||||
        self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
 | 
			
		||||
        self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
 | 
			
		||||
        # .. but make sure the file name is never empty
 | 
			
		||||
        self.assertTrue(sanitize_filename('-', restricted=True) != '')
 | 
			
		||||
        self.assertTrue(sanitize_filename(':', restricted=True) != '')
 | 
			
		||||
 | 
			
		||||
    def test_sanitize_ids(self):
 | 
			
		||||
        self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
 | 
			
		||||
        self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
 | 
			
		||||
        self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
 | 
			
		||||
 | 
			
		||||
    def test_ordered_set(self):
 | 
			
		||||
        self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
 | 
			
		||||
        self.assertEqual(orderedSet([]), [])
 | 
			
		||||
        self.assertEqual(orderedSet([1]), [1])
 | 
			
		||||
        #keep the list ordered
 | 
			
		||||
        self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
 | 
			
		||||
 | 
			
		||||
    def test_unescape_html(self):
 | 
			
		||||
        self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										77
									
								
								test/test_write_info_json.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								test/test_write_info_json.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,77 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
import youtube_dl.FileDownloader
 | 
			
		||||
import youtube_dl.InfoExtractors
 | 
			
		||||
from youtube_dl.utils import *
 | 
			
		||||
 | 
			
		||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 | 
			
		||||
 | 
			
		||||
# General configuration (from __init__, not very elegant...)
 | 
			
		||||
jar = compat_cookiejar.CookieJar()
 | 
			
		||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
 | 
			
		||||
proxy_handler = compat_urllib_request.ProxyHandler()
 | 
			
		||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
 | 
			
		||||
compat_urllib_request.install_opener(opener)
 | 
			
		||||
 | 
			
		||||
class FileDownloader(youtube_dl.FileDownloader):
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
 | 
			
		||||
        self.to_stderr = self.to_screen
 | 
			
		||||
 | 
			
		||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
 | 
			
		||||
    params = json.load(pf)
 | 
			
		||||
params['writeinfojson'] = True
 | 
			
		||||
params['skip_download'] = True
 | 
			
		||||
params['writedescription'] = True
 | 
			
		||||
 | 
			
		||||
TEST_ID = 'BaW_jenozKc'
 | 
			
		||||
INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
 | 
			
		||||
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
 | 
			
		||||
EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐
 | 
			
		||||
 | 
			
		||||
This is a test video for youtube-dl.
 | 
			
		||||
 | 
			
		||||
For more information, contact phihag@phihag.de .'''
 | 
			
		||||
 | 
			
		||||
class TestInfoJSON(unittest.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        # Clear old files
 | 
			
		||||
        self.tearDown()
 | 
			
		||||
 | 
			
		||||
    def test_info_json(self):
 | 
			
		||||
        ie = youtube_dl.InfoExtractors.YoutubeIE()
 | 
			
		||||
        fd = FileDownloader(params)
 | 
			
		||||
        fd.add_info_extractor(ie)
 | 
			
		||||
        fd.download([TEST_ID])
 | 
			
		||||
        self.assertTrue(os.path.exists(INFO_JSON_FILE))
 | 
			
		||||
        with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
 | 
			
		||||
            jd = json.load(jsonf)
 | 
			
		||||
        self.assertEqual(jd['upload_date'], u'20121002')
 | 
			
		||||
        self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
 | 
			
		||||
        self.assertEqual(jd['id'], TEST_ID)
 | 
			
		||||
        self.assertEqual(jd['extractor'], 'youtube')
 | 
			
		||||
        self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
 | 
			
		||||
        self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
 | 
			
		||||
 | 
			
		||||
        self.assertTrue(os.path.exists(DESCRIPTION_FILE))
 | 
			
		||||
        with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
 | 
			
		||||
            descr = descf.read()
 | 
			
		||||
        self.assertEqual(descr, EXPECTED_DESCRIPTION)
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        if os.path.exists(INFO_JSON_FILE):
 | 
			
		||||
            os.remove(INFO_JSON_FILE)
 | 
			
		||||
        if os.path.exists(DESCRIPTION_FILE):
 | 
			
		||||
            os.remove(DESCRIPTION_FILE)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										102
									
								
								test/test_youtube_lists.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								test/test_youtube_lists.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,102 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE
 | 
			
		||||
from youtube_dl.utils import *
 | 
			
		||||
from youtube_dl.FileDownloader import FileDownloader
 | 
			
		||||
 | 
			
		||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 | 
			
		||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
 | 
			
		||||
    parameters = json.load(pf)
 | 
			
		||||
 | 
			
		||||
# General configuration (from __init__, not very elegant...)
 | 
			
		||||
jar = compat_cookiejar.CookieJar()
 | 
			
		||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
 | 
			
		||||
proxy_handler = compat_urllib_request.ProxyHandler()
 | 
			
		||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
 | 
			
		||||
compat_urllib_request.install_opener(opener)
 | 
			
		||||
 | 
			
		||||
class FakeDownloader(FileDownloader):
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.result = []
 | 
			
		||||
        self.params = parameters
 | 
			
		||||
    def to_screen(self, s):
 | 
			
		||||
        print(s)
 | 
			
		||||
    def trouble(self, s):
 | 
			
		||||
        raise Exception(s)
 | 
			
		||||
    def extract_info(self, url):
 | 
			
		||||
        self.result.append(url)
 | 
			
		||||
        return url
 | 
			
		||||
 | 
			
		||||
class TestYoutubeLists(unittest.TestCase):
 | 
			
		||||
    def assertIsPlaylist(self,info):
 | 
			
		||||
        """Make sure the info has '_type' set to 'playlist'"""
 | 
			
		||||
        self.assertEqual(info['_type'], 'playlist')
 | 
			
		||||
 | 
			
		||||
    def test_youtube_playlist(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')[0]
 | 
			
		||||
        self.assertIsPlaylist(result)
 | 
			
		||||
        self.assertEqual(result['title'], 'ytdl test PL')
 | 
			
		||||
        ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
 | 
			
		||||
        self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
 | 
			
		||||
 | 
			
		||||
    def test_issue_673(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
        result = ie.extract('PLBB231211A4F62143')[0]
 | 
			
		||||
        self.assertEqual(result['title'], 'Team Fortress 2')
 | 
			
		||||
        self.assertTrue(len(result['entries']) > 40)
 | 
			
		||||
 | 
			
		||||
    def test_youtube_playlist_long(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')[0]
 | 
			
		||||
        self.assertIsPlaylist(result)
 | 
			
		||||
        self.assertTrue(len(result['entries']) >= 799)
 | 
			
		||||
 | 
			
		||||
    def test_youtube_playlist_with_deleted(self):
 | 
			
		||||
        #651
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')[0]
 | 
			
		||||
        ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
 | 
			
		||||
        self.assertFalse('pElCt5oNDuI' in ytie_results)
 | 
			
		||||
        self.assertFalse('KdPEApIVdWM' in ytie_results)
 | 
			
		||||
 | 
			
		||||
    def test_youtube_course(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
        # TODO find a > 100 (paginating?) videos course
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')[0]
 | 
			
		||||
        entries = result['entries']
 | 
			
		||||
        self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
 | 
			
		||||
        self.assertEqual(len(entries), 25)
 | 
			
		||||
        self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
 | 
			
		||||
 | 
			
		||||
    def test_youtube_channel(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubeChannelIE(dl)
 | 
			
		||||
        #test paginated channel
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')[0]
 | 
			
		||||
        self.assertTrue(len(result['entries']) > 90)
 | 
			
		||||
        #test autogenerated channel
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')[0]
 | 
			
		||||
        self.assertTrue(len(result['entries']) > 20)
 | 
			
		||||
 | 
			
		||||
    def test_youtube_user(self):
 | 
			
		||||
        dl = FakeDownloader()
 | 
			
		||||
        ie = YoutubeUserIE(dl)
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')[0]
 | 
			
		||||
        self.assertTrue(len(result['entries']) >= 320)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										100
									
								
								test/test_youtube_subtitles.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								test/test_youtube_subtitles.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import io
 | 
			
		||||
import hashlib
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 | 
			
		||||
 | 
			
		||||
from youtube_dl.InfoExtractors import YoutubeIE
 | 
			
		||||
from youtube_dl.utils import *
 | 
			
		||||
 | 
			
		||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 | 
			
		||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
 | 
			
		||||
    parameters = json.load(pf)
 | 
			
		||||
 | 
			
		||||
# General configuration (from __init__, not very elegant...)
 | 
			
		||||
jar = compat_cookiejar.CookieJar()
 | 
			
		||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
 | 
			
		||||
proxy_handler = compat_urllib_request.ProxyHandler()
 | 
			
		||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
 | 
			
		||||
compat_urllib_request.install_opener(opener)
 | 
			
		||||
 | 
			
		||||
class FakeDownloader(object):
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.result = []
 | 
			
		||||
        self.params = parameters
 | 
			
		||||
    def to_screen(self, s):
 | 
			
		||||
        print(s)
 | 
			
		||||
    def trouble(self, s):
 | 
			
		||||
        raise Exception(s)
 | 
			
		||||
    def download(self, x):
 | 
			
		||||
        self.result.append(x)
 | 
			
		||||
 | 
			
		||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 | 
			
		||||
 | 
			
		||||
class TestYoutubeSubtitles(unittest.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['allsubtitles'] = False
 | 
			
		||||
        DL.params['writesubtitles'] = False
 | 
			
		||||
        DL.params['subtitlesformat'] = 'srt'
 | 
			
		||||
        DL.params['listsubtitles'] = False
 | 
			
		||||
    def test_youtube_no_subtitles(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['writesubtitles'] = False
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        subtitles = info_dict[0]['subtitles']
 | 
			
		||||
        self.assertEqual(subtitles, None)
 | 
			
		||||
    def test_youtube_subtitles(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['writesubtitles'] = True
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        sub = info_dict[0]['subtitles'][0]
 | 
			
		||||
        self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
 | 
			
		||||
    def test_youtube_subtitles_it(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['writesubtitles'] = True
 | 
			
		||||
        DL.params['subtitleslang'] = 'it'
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        sub = info_dict[0]['subtitles'][0]
 | 
			
		||||
        self.assertEqual(md5(sub[2]), '164a51f16f260476a05b50fe4c2f161d')
 | 
			
		||||
    def test_youtube_onlysubtitles(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['writesubtitles'] = True
 | 
			
		||||
        DL.params['onlysubtitles'] = True
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        sub = info_dict[0]['subtitles'][0]
 | 
			
		||||
        self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
 | 
			
		||||
    def test_youtube_allsubtitles(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['allsubtitles'] = True
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        subtitles = info_dict[0]['subtitles']
 | 
			
		||||
        self.assertEqual(len(subtitles), 13)
 | 
			
		||||
    def test_youtube_subtitles_format(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['writesubtitles'] = True
 | 
			
		||||
        DL.params['subtitlesformat'] = 'sbv'
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        sub = info_dict[0]['subtitles'][0]
 | 
			
		||||
        self.assertEqual(md5(sub[2]), '13aeaa0c245a8bed9a451cb643e3ad8b')
 | 
			
		||||
    def test_youtube_list_subtitles(self):
 | 
			
		||||
        DL = FakeDownloader()
 | 
			
		||||
        DL.params['listsubtitles'] = True
 | 
			
		||||
        IE = YoutubeIE(DL)
 | 
			
		||||
        info_dict = IE.extract('QRS8MkLhQmM')
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										342
									
								
								test/tests.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								test/tests.json
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,342 @@
 | 
			
		||||
[
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Youtube",
 | 
			
		||||
    "url":  "http://www.youtube.com/watch?v=BaW_jenozKc",
 | 
			
		||||
    "file":  "BaW_jenozKc.mp4",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
      "title": "youtube-dl test video \"'/\\ä↭𝕐",
 | 
			
		||||
      "uploader": "Philipp Hagemeister",
 | 
			
		||||
      "uploader_id": "phihag",
 | 
			
		||||
      "upload_date": "20121002",
 | 
			
		||||
      "description": "test chars:  \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Dailymotion",
 | 
			
		||||
    "md5":  "392c4b85a60a90dc4792da41ce3144eb",
 | 
			
		||||
    "url":  "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech",
 | 
			
		||||
    "file":  "x33vw9.mp4"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Metacafe",
 | 
			
		||||
    "add_ie": ["Youtube"],
 | 
			
		||||
    "url":  "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
 | 
			
		||||
    "file":  "_aUehQsCQtM.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "BlipTV",
 | 
			
		||||
    "md5":  "b2d849efcf7ee18917e4b4d9ff37cafe",
 | 
			
		||||
    "url":  "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352",
 | 
			
		||||
    "file":  "5779306.m4v"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "XVideos",
 | 
			
		||||
    "md5":  "1d0c835822f0a71a7bf011855db929d0",
 | 
			
		||||
    "url":  "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
 | 
			
		||||
    "file":  "939581.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "YouPorn",
 | 
			
		||||
    "md5": "c37ddbaaa39058c76a7e86c6813423c1",
 | 
			
		||||
    "url": "http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/",
 | 
			
		||||
    "file": "505835.mp4"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Pornotube",
 | 
			
		||||
    "md5": "374dd6dcedd24234453b295209aa69b6",
 | 
			
		||||
    "url": "http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing",
 | 
			
		||||
    "file": "1689755.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "YouJizz",
 | 
			
		||||
    "md5": "07e15fa469ba384c7693fd246905547c",
 | 
			
		||||
    "url": "http://www.youjizz.com/videos/zeichentrick-1-2189178.html",
 | 
			
		||||
    "file": "2189178.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Vimeo",
 | 
			
		||||
    "md5":  "8879b6cc097e987f02484baf890129e5",
 | 
			
		||||
    "url":  "http://vimeo.com/56015672",
 | 
			
		||||
    "file": "56015672.mp4",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
      "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
 | 
			
		||||
      "uploader": "Filippo Valsorda",
 | 
			
		||||
      "uploader_id": "user7108434",
 | 
			
		||||
      "upload_date": "20121220",
 | 
			
		||||
      "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Soundcloud",
 | 
			
		||||
    "md5":  "ebef0a451b909710ed1d7787dddbf0d7",
 | 
			
		||||
    "url":  "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy",
 | 
			
		||||
    "file":  "62986583.mp3"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "StanfordOpenClassroom",
 | 
			
		||||
    "md5":  "544a9468546059d4e80d76265b0443b8",
 | 
			
		||||
    "url":  "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
 | 
			
		||||
    "file":  "PracticalUnix_intro-environment.mp4"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "XNXX",
 | 
			
		||||
    "md5":  "0831677e2b4761795f68d417e0b7b445",
 | 
			
		||||
    "url":  "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_",
 | 
			
		||||
    "file":  "1135332.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Youku",
 | 
			
		||||
    "url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
 | 
			
		||||
    "file": "XNDgyMDQ2NTQw_part00.flv",
 | 
			
		||||
    "md5": "ffe3f2e435663dc2d1eea34faeff5b5b",
 | 
			
		||||
    "params": { "test": false }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "NBA",
 | 
			
		||||
    "url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html",
 | 
			
		||||
    "file": "0021200253-okc-bkn-recap.nba.mp4",
 | 
			
		||||
    "md5": "c0edcfc37607344e2ff8f13c378c88a4"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "JustinTV",
 | 
			
		||||
    "url": "http://www.twitch.tv/thegamedevhub/b/296128360",
 | 
			
		||||
    "file": "296128360.flv",
 | 
			
		||||
    "md5": "ecaa8a790c22a40770901460af191c9a"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "MyVideo",
 | 
			
		||||
    "url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win",
 | 
			
		||||
    "file": "8229274.flv",
 | 
			
		||||
    "md5": "2d2753e8130479ba2cb7e0a37002053e"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Escapist",
 | 
			
		||||
    "url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
 | 
			
		||||
    "file": "6618-Breaking-Down-Baldurs-Gate.flv",
 | 
			
		||||
    "md5": "c6793dbda81388f4264c1ba18684a74d",
 | 
			
		||||
    "skip": "Fails with timeout on Travis"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "GooglePlus",
 | 
			
		||||
    "url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
 | 
			
		||||
    "file": "ZButuJc6CtH.flv"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "FunnyOrDie",
 | 
			
		||||
    "url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version",
 | 
			
		||||
    "file": "0732f586d7.mp4",
 | 
			
		||||
    "md5": "f647e9e90064b53b6e046e75d0241fbd"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Steam",
 | 
			
		||||
    "url": "http://store.steampowered.com/video/105600/",
 | 
			
		||||
    "playlist": [
 | 
			
		||||
      {
 | 
			
		||||
        "file": "81300.flv",
 | 
			
		||||
        "md5": "f870007cee7065d7c76b88f0a45ecc07",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
            "title": "Terraria 1.1 Trailer"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "80859.flv",
 | 
			
		||||
        "md5": "61aaf31a5c5c3041afb58fb83cbb5751",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "Terraria Trailer"
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Ustream",
 | 
			
		||||
    "url": "http://www.ustream.tv/recorded/20274954",
 | 
			
		||||
    "file": "20274954.flv",
 | 
			
		||||
    "md5": "088f151799e8f572f84eb62f17d73e5c",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Young Americans for Liberty February 7, 2012 2:28 AM"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "InfoQ",
 | 
			
		||||
    "url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
 | 
			
		||||
    "file": "12-jan-pythonthings.mp4",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
      "title": "A Few of My Favorite [Python] Things"
 | 
			
		||||
    },
 | 
			
		||||
    "params": {
 | 
			
		||||
      "skip_download": true
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "ComedyCentral",
 | 
			
		||||
    "url": "http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart",
 | 
			
		||||
    "file": "422212.mp4",
 | 
			
		||||
    "md5": "4e2f5cb088a83cd8cdb7756132f9739d",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "thedailyshow-kristen-stewart part 1"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "RBMARadio",
 | 
			
		||||
    "url": "http://www.rbmaradio.com/shows/ford-lopatin-live-at-primavera-sound-2011",
 | 
			
		||||
    "file": "ford-lopatin-live-at-primavera-sound-2011.mp3",
 | 
			
		||||
    "md5": "6bc6f9bcb18994b4c983bc3bf4384d95",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Live at Primavera Sound 2011",
 | 
			
		||||
        "description": "Joel Ford and Daniel \u2019Oneohtrix Point Never\u2019 Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
 | 
			
		||||
        "uploader": "Ford & Lopatin",
 | 
			
		||||
        "uploader_id": "ford-lopatin",
 | 
			
		||||
        "location": "Spain"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Facebook",
 | 
			
		||||
    "url": "https://www.facebook.com/photo.php?v=120708114770723",
 | 
			
		||||
    "file": "120708114770723.mp4",
 | 
			
		||||
    "md5": "48975a41ccc4b7a581abd68651c1a5a8",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
      "title": "PEOPLE ARE AWESOME 2013",
 | 
			
		||||
      "duration": 279
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "EightTracks",
 | 
			
		||||
    "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
 | 
			
		||||
    "playlist": [
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885610.m4a",
 | 
			
		||||
        "md5": "96ce57f24389fc8734ce47f4c1abcc55",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
 | 
			
		||||
          "uploader_id": "ytdl"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885608.m4a",
 | 
			
		||||
        "md5": "4ab26f05c1f7291ea460a3920be8021f",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
 | 
			
		||||
          "uploader_id": "ytdl"
 | 
			
		||||
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885679.m4a",
 | 
			
		||||
        "md5": "d30b5b5f74217410f4689605c35d1fd7",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885680.m4a",
 | 
			
		||||
        "md5": "4eb0a669317cd725f6bbd336a29f923a",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885682.m4a",
 | 
			
		||||
        "md5": "1893e872e263a2705558d1d319ad19e8",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885683.m4a",
 | 
			
		||||
        "md5": "b673c46f47a216ab1741ae8836af5899",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885684.m4a",
 | 
			
		||||
        "md5": "1d74534e95df54986da7f5abf7d842b7",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        "file": "11885685.m4a",
 | 
			
		||||
        "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
          "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad"
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Keek",
 | 
			
		||||
    "url": "http://www.keek.com/ytdl/keeks/NODfbab",
 | 
			
		||||
    "file": "NODfbab.mp4",
 | 
			
		||||
    "md5": "9b0636f8c0f7614afa4ea5e4c6e57e83",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
      "title": "test chars: \"'/\\ä<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "TED",
 | 
			
		||||
    "url": "http://www.ted.com/talks/dan_dennett_on_our_consciousness.html",
 | 
			
		||||
    "file": "102.mp4",
 | 
			
		||||
    "md5": "7bc087e71d16f18f9b8ab9fa62a8a031",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Dan Dennett: The illusion of consciousness",
 | 
			
		||||
        "thumbnail": "http://images.ted.com/images/ted/488_389x292.jpg"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "MySpass",
 | 
			
		||||
    "url": "http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/",
 | 
			
		||||
    "file": "11741.mp4",
 | 
			
		||||
    "md5": "0b49f4844a068f8b33f4b7c88405862b",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Generic",
 | 
			
		||||
    "url": "http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html",
 | 
			
		||||
    "file": "13601338388002.mp4",
 | 
			
		||||
    "md5": "85b90ccc9d73b4acd9138d3af4c27f89"
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "Spiegel",
 | 
			
		||||
    "url": "http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html",
 | 
			
		||||
    "file": "1259285.mp4",
 | 
			
		||||
    "md5": "2c2754212136f35fb4b19767d242f66e",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "LiveLeak",
 | 
			
		||||
    "md5":  "0813c2430bea7a46bf13acf3406992f4",
 | 
			
		||||
    "url":  "http://www.liveleak.com/view?i=757_1364311680",
 | 
			
		||||
    "file":  "757_1364311680.mp4",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Most unlucky car accident",
 | 
			
		||||
        "description": "extremely bad day for this guy..!",
 | 
			
		||||
        "uploader": "ljfriel2"
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "WorldStarHipHop",
 | 
			
		||||
    "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
 | 
			
		||||
    "file": "wshh6a7q1ny0G34ZwuIO.mp4",
 | 
			
		||||
    "md5": "9d04de741161603bf7071bbf4e883186",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    "name": "ARD",
 | 
			
		||||
    "url": "http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640",
 | 
			
		||||
    "file": "14077640.mp4",
 | 
			
		||||
    "md5": "6ca8824255460c787376353f9e20bbd8",
 | 
			
		||||
    "info_dict": {
 | 
			
		||||
        "title": "11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
 | 
			
		||||
    },
 | 
			
		||||
    "skip": "Requires rtmpdump"
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
]
 | 
			
		||||
							
								
								
									
										3081
									
								
								youtube-dl
									
									
									
									
									
								
							
							
						
						
									
										3081
									
								
								youtube-dl
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								youtube-dl.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								youtube-dl.exe
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										981
									
								
								youtube_dl/FileDownloader.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										981
									
								
								youtube_dl/FileDownloader.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,981 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
import math
 | 
			
		||||
import io
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import socket
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
import time
 | 
			
		||||
import traceback
 | 
			
		||||
 | 
			
		||||
if os.name == 'nt':
 | 
			
		||||
    import ctypes
 | 
			
		||||
 | 
			
		||||
from .utils import *
 | 
			
		||||
from .InfoExtractors import get_info_extractor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FileDownloader(object):
 | 
			
		||||
    """File Downloader class.
 | 
			
		||||
 | 
			
		||||
    File downloader objects are the ones responsible of downloading the
 | 
			
		||||
    actual video file and writing it to disk if the user has requested
 | 
			
		||||
    it, among some other tasks. In most cases there should be one per
 | 
			
		||||
    program. As, given a video URL, the downloader doesn't know how to
 | 
			
		||||
    extract all the needed information, task that InfoExtractors do, it
 | 
			
		||||
    has to pass the URL to one of them.
 | 
			
		||||
 | 
			
		||||
    For this, file downloader objects have a method that allows
 | 
			
		||||
    InfoExtractors to be registered in a given order. When it is passed
 | 
			
		||||
    a URL, the file downloader handles it to the first InfoExtractor it
 | 
			
		||||
    finds that reports being able to handle it. The InfoExtractor extracts
 | 
			
		||||
    all the information about the video or videos the URL refers to, and
 | 
			
		||||
    asks the FileDownloader to process the video information, possibly
 | 
			
		||||
    downloading the video.
 | 
			
		||||
 | 
			
		||||
    File downloaders accept a lot of parameters. In order not to saturate
 | 
			
		||||
    the object constructor with arguments, it receives a dictionary of
 | 
			
		||||
    options instead. These options are available through the params
 | 
			
		||||
    attribute for the InfoExtractors to use. The FileDownloader also
 | 
			
		||||
    registers itself as the downloader in charge for the InfoExtractors
 | 
			
		||||
    that are added to it, so this is a "mutual registration".
 | 
			
		||||
 | 
			
		||||
    Available options:
 | 
			
		||||
 | 
			
		||||
    username:          Username for authentication purposes.
 | 
			
		||||
    password:          Password for authentication purposes.
 | 
			
		||||
    usenetrc:          Use netrc for authentication instead.
 | 
			
		||||
    quiet:             Do not print messages to stdout.
 | 
			
		||||
    forceurl:          Force printing final URL.
 | 
			
		||||
    forcetitle:        Force printing title.
 | 
			
		||||
    forcethumbnail:    Force printing thumbnail URL.
 | 
			
		||||
    forcedescription:  Force printing description.
 | 
			
		||||
    forcefilename:     Force printing final filename.
 | 
			
		||||
    simulate:          Do not download the video files.
 | 
			
		||||
    format:            Video format code.
 | 
			
		||||
    format_limit:      Highest quality format to try.
 | 
			
		||||
    outtmpl:           Template for output names.
 | 
			
		||||
    restrictfilenames: Do not allow "&" and spaces in file names
 | 
			
		||||
    ignoreerrors:      Do not stop on download errors.
 | 
			
		||||
    ratelimit:         Download speed limit, in bytes/sec.
 | 
			
		||||
    nooverwrites:      Prevent overwriting files.
 | 
			
		||||
    retries:           Number of times to retry for HTTP error 5xx
 | 
			
		||||
    buffersize:        Size of download buffer in bytes.
 | 
			
		||||
    noresizebuffer:    Do not automatically resize the download buffer.
 | 
			
		||||
    continuedl:        Try to continue downloads if possible.
 | 
			
		||||
    noprogress:        Do not print the progress bar.
 | 
			
		||||
    playliststart:     Playlist item to start at.
 | 
			
		||||
    playlistend:       Playlist item to end at.
 | 
			
		||||
    matchtitle:        Download only matching titles.
 | 
			
		||||
    rejecttitle:       Reject downloads for matching titles.
 | 
			
		||||
    logtostderr:       Log messages to stderr instead of stdout.
 | 
			
		||||
    consoletitle:      Display progress in console window's titlebar.
 | 
			
		||||
    nopart:            Do not use temporary .part files.
 | 
			
		||||
    updatetime:        Use the Last-modified header to set output file timestamps.
 | 
			
		||||
    writedescription:  Write the video description to a .description file
 | 
			
		||||
    writeinfojson:     Write the video description to a .info.json file
 | 
			
		||||
    writesubtitles:    Write the video subtitles to a file
 | 
			
		||||
    onlysubtitles:     Downloads only the subtitles of the video
 | 
			
		||||
    allsubtitles:      Downloads all the subtitles of the video
 | 
			
		||||
    listsubtitles:     Lists all available subtitles for the video
 | 
			
		||||
    subtitlesformat:   Subtitle format [sbv/srt] (default=srt)
 | 
			
		||||
    subtitleslang:     Language of the subtitles to download
 | 
			
		||||
    test:              Download only first bytes to test the downloader.
 | 
			
		||||
    keepvideo:         Keep the video file after post-processing
 | 
			
		||||
    min_filesize:      Skip files smaller than this size
 | 
			
		||||
    max_filesize:      Skip files larger than this size
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    params = None
 | 
			
		||||
    _ies = []
 | 
			
		||||
    _pps = []
 | 
			
		||||
    _download_retcode = None
 | 
			
		||||
    _num_downloads = None
 | 
			
		||||
    _screen_file = None
 | 
			
		||||
 | 
			
		||||
    def __init__(self, params):
 | 
			
		||||
        """Create a FileDownloader object with the given options."""
 | 
			
		||||
        self._ies = []
 | 
			
		||||
        self._pps = []
 | 
			
		||||
        self._progress_hooks = []
 | 
			
		||||
        self._download_retcode = 0
 | 
			
		||||
        self._num_downloads = 0
 | 
			
		||||
        self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
 | 
			
		||||
        self.params = params
 | 
			
		||||
 | 
			
		||||
        if '%(stitle)s' in self.params['outtmpl']:
 | 
			
		||||
            self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def format_bytes(bytes):
 | 
			
		||||
        if bytes is None:
 | 
			
		||||
            return 'N/A'
 | 
			
		||||
        if type(bytes) is str:
 | 
			
		||||
            bytes = float(bytes)
 | 
			
		||||
        if bytes == 0.0:
 | 
			
		||||
            exponent = 0
 | 
			
		||||
        else:
 | 
			
		||||
            exponent = int(math.log(bytes, 1024.0))
 | 
			
		||||
        suffix = 'bkMGTPEZY'[exponent]
 | 
			
		||||
        converted = float(bytes) / float(1024 ** exponent)
 | 
			
		||||
        return '%.2f%s' % (converted, suffix)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def calc_percent(byte_counter, data_len):
 | 
			
		||||
        if data_len is None:
 | 
			
		||||
            return '---.-%'
 | 
			
		||||
        return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def calc_eta(start, now, total, current):
 | 
			
		||||
        if total is None:
 | 
			
		||||
            return '--:--'
 | 
			
		||||
        dif = now - start
 | 
			
		||||
        if current == 0 or dif < 0.001: # One millisecond
 | 
			
		||||
            return '--:--'
 | 
			
		||||
        rate = float(current) / dif
 | 
			
		||||
        eta = int((float(total) - float(current)) / rate)
 | 
			
		||||
        (eta_mins, eta_secs) = divmod(eta, 60)
 | 
			
		||||
        if eta_mins > 99:
 | 
			
		||||
            return '--:--'
 | 
			
		||||
        return '%02d:%02d' % (eta_mins, eta_secs)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def calc_speed(start, now, bytes):
 | 
			
		||||
        dif = now - start
 | 
			
		||||
        if bytes == 0 or dif < 0.001: # One millisecond
 | 
			
		||||
            return '%10s' % '---b/s'
 | 
			
		||||
        return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def best_block_size(elapsed_time, bytes):
 | 
			
		||||
        new_min = max(bytes / 2.0, 1.0)
 | 
			
		||||
        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
 | 
			
		||||
        if elapsed_time < 0.001:
 | 
			
		||||
            return int(new_max)
 | 
			
		||||
        rate = bytes / elapsed_time
 | 
			
		||||
        if rate > new_max:
 | 
			
		||||
            return int(new_max)
 | 
			
		||||
        if rate < new_min:
 | 
			
		||||
            return int(new_min)
 | 
			
		||||
        return int(rate)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def parse_bytes(bytestr):
 | 
			
		||||
        """Parse a string indicating a byte quantity into an integer."""
 | 
			
		||||
        matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
 | 
			
		||||
        if matchobj is None:
 | 
			
		||||
            return None
 | 
			
		||||
        number = float(matchobj.group(1))
 | 
			
		||||
        multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
 | 
			
		||||
        return int(round(number * multiplier))
 | 
			
		||||
 | 
			
		||||
    def add_info_extractor(self, ie):
 | 
			
		||||
        """Add an InfoExtractor object to the end of the list."""
 | 
			
		||||
        self._ies.append(ie)
 | 
			
		||||
        ie.set_downloader(self)
 | 
			
		||||
 | 
			
		||||
    def add_post_processor(self, pp):
 | 
			
		||||
        """Add a PostProcessor object to the end of the chain."""
 | 
			
		||||
        self._pps.append(pp)
 | 
			
		||||
        pp.set_downloader(self)
 | 
			
		||||
 | 
			
		||||
    def to_screen(self, message, skip_eol=False):
 | 
			
		||||
        """Print message to stdout if not in quiet mode."""
 | 
			
		||||
        assert type(message) == type(u'')
 | 
			
		||||
        if not self.params.get('quiet', False):
 | 
			
		||||
            terminator = [u'\n', u''][skip_eol]
 | 
			
		||||
            output = message + terminator
 | 
			
		||||
            if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
 | 
			
		||||
                output = output.encode(preferredencoding(), 'ignore')
 | 
			
		||||
            self._screen_file.write(output)
 | 
			
		||||
            self._screen_file.flush()
 | 
			
		||||
 | 
			
		||||
    def to_stderr(self, message):
 | 
			
		||||
        """Print message to stderr."""
 | 
			
		||||
        assert type(message) == type(u'')
 | 
			
		||||
        output = message + u'\n'
 | 
			
		||||
        if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
 | 
			
		||||
            output = output.encode(preferredencoding())
 | 
			
		||||
        sys.stderr.write(output)
 | 
			
		||||
 | 
			
		||||
    def to_cons_title(self, message):
 | 
			
		||||
        """Set console/terminal window title to message."""
 | 
			
		||||
        if not self.params.get('consoletitle', False):
 | 
			
		||||
            return
 | 
			
		||||
        if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
 | 
			
		||||
            # c_wchar_p() might not be necessary if `message` is
 | 
			
		||||
            # already of type unicode()
 | 
			
		||||
            ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
 | 
			
		||||
        elif 'TERM' in os.environ:
 | 
			
		||||
            self.to_screen('\033]0;%s\007' % message, skip_eol=True)
 | 
			
		||||
 | 
			
		||||
    def fixed_template(self):
 | 
			
		||||
        """Checks if the output template is fixed."""
 | 
			
		||||
        return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None)
 | 
			
		||||
 | 
			
		||||
    def trouble(self, message=None, tb=None):
 | 
			
		||||
        """Determine action to take when a download problem appears.
 | 
			
		||||
 | 
			
		||||
        Depending on if the downloader has been configured to ignore
 | 
			
		||||
        download errors or not, this method may throw an exception or
 | 
			
		||||
        not when errors are found, after printing the message.
 | 
			
		||||
 | 
			
		||||
        tb, if given, is additional traceback information.
 | 
			
		||||
        """
 | 
			
		||||
        if message is not None:
 | 
			
		||||
            self.to_stderr(message)
 | 
			
		||||
        if self.params.get('verbose'):
 | 
			
		||||
            if tb is None:
 | 
			
		||||
                if sys.exc_info()[0]:  # if .trouble has been called from an except block
 | 
			
		||||
                    tb = u''
 | 
			
		||||
                    if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
 | 
			
		||||
                        tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
 | 
			
		||||
                    tb += compat_str(traceback.format_exc())
 | 
			
		||||
                else:
 | 
			
		||||
                    tb_data = traceback.format_list(traceback.extract_stack())
 | 
			
		||||
                    tb = u''.join(tb_data)
 | 
			
		||||
            self.to_stderr(tb)
 | 
			
		||||
        if not self.params.get('ignoreerrors', False):
 | 
			
		||||
            if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
 | 
			
		||||
                exc_info = sys.exc_info()[1].exc_info
 | 
			
		||||
            else:
 | 
			
		||||
                exc_info = sys.exc_info()
 | 
			
		||||
            raise DownloadError(message, exc_info)
 | 
			
		||||
        self._download_retcode = 1
 | 
			
		||||
 | 
			
		||||
    def report_warning(self, message):
 | 
			
		||||
        '''
 | 
			
		||||
        Print the message to stderr, it will be prefixed with 'WARNING:'
 | 
			
		||||
        If stderr is a tty file the 'WARNING:' will be colored
 | 
			
		||||
        '''
 | 
			
		||||
        if sys.stderr.isatty():
 | 
			
		||||
            _msg_header=u'\033[0;33mWARNING:\033[0m'
 | 
			
		||||
        else:
 | 
			
		||||
            _msg_header=u'WARNING:'
 | 
			
		||||
        warning_message=u'%s %s' % (_msg_header,message)
 | 
			
		||||
        self.to_stderr(warning_message)
 | 
			
		||||
 | 
			
		||||
    def report_error(self, message, tb=None):
 | 
			
		||||
        '''
 | 
			
		||||
        Do the same as trouble, but prefixes the message with 'ERROR:', colored
 | 
			
		||||
        in red if stderr is a tty file.
 | 
			
		||||
        '''
 | 
			
		||||
        if sys.stderr.isatty():
 | 
			
		||||
            _msg_header = u'\033[0;31mERROR:\033[0m'
 | 
			
		||||
        else:
 | 
			
		||||
            _msg_header = u'ERROR:'
 | 
			
		||||
        error_message = u'%s %s' % (_msg_header, message)
 | 
			
		||||
        self.trouble(error_message, tb)
 | 
			
		||||
 | 
			
		||||
    def slow_down(self, start_time, byte_counter):
 | 
			
		||||
        """Sleep if the download speed is over the rate limit."""
 | 
			
		||||
        rate_limit = self.params.get('ratelimit', None)
 | 
			
		||||
        if rate_limit is None or byte_counter == 0:
 | 
			
		||||
            return
 | 
			
		||||
        now = time.time()
 | 
			
		||||
        elapsed = now - start_time
 | 
			
		||||
        if elapsed <= 0.0:
 | 
			
		||||
            return
 | 
			
		||||
        speed = float(byte_counter) / elapsed
 | 
			
		||||
        if speed > rate_limit:
 | 
			
		||||
            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
 | 
			
		||||
 | 
			
		||||
    def temp_name(self, filename):
 | 
			
		||||
        """Returns a temporary filename for the given filename."""
 | 
			
		||||
        if self.params.get('nopart', False) or filename == u'-' or \
 | 
			
		||||
                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
 | 
			
		||||
            return filename
 | 
			
		||||
        return filename + u'.part'
 | 
			
		||||
 | 
			
		||||
    def undo_temp_name(self, filename):
 | 
			
		||||
        if filename.endswith(u'.part'):
 | 
			
		||||
            return filename[:-len(u'.part')]
 | 
			
		||||
        return filename
 | 
			
		||||
 | 
			
		||||
    def try_rename(self, old_filename, new_filename):
 | 
			
		||||
        try:
 | 
			
		||||
            if old_filename == new_filename:
 | 
			
		||||
                return
 | 
			
		||||
            os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            self.report_error(u'unable to rename file')
 | 
			
		||||
 | 
			
		||||
    def try_utime(self, filename, last_modified_hdr):
 | 
			
		||||
        """Try to set the last-modified time of the given file."""
 | 
			
		||||
        if last_modified_hdr is None:
 | 
			
		||||
            return
 | 
			
		||||
        if not os.path.isfile(encodeFilename(filename)):
 | 
			
		||||
            return
 | 
			
		||||
        timestr = last_modified_hdr
 | 
			
		||||
        if timestr is None:
 | 
			
		||||
            return
 | 
			
		||||
        filetime = timeconvert(timestr)
 | 
			
		||||
        if filetime is None:
 | 
			
		||||
            return filetime
 | 
			
		||||
        try:
 | 
			
		||||
            os.utime(filename, (time.time(), filetime))
 | 
			
		||||
        except:
 | 
			
		||||
            pass
 | 
			
		||||
        return filetime
 | 
			
		||||
 | 
			
		||||
    def report_writedescription(self, descfn):
 | 
			
		||||
        """ Report that the description file is being written """
 | 
			
		||||
        self.to_screen(u'[info] Writing video description to: ' + descfn)
 | 
			
		||||
 | 
			
		||||
    def report_writesubtitles(self, sub_filename):
 | 
			
		||||
        """ Report that the subtitles file is being written """
 | 
			
		||||
        self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
 | 
			
		||||
 | 
			
		||||
    def report_writeinfojson(self, infofn):
 | 
			
		||||
        """ Report that the metadata file has been written """
 | 
			
		||||
        self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
 | 
			
		||||
 | 
			
		||||
    def report_destination(self, filename):
 | 
			
		||||
        """Report destination filename."""
 | 
			
		||||
        self.to_screen(u'[download] Destination: ' + filename)
 | 
			
		||||
 | 
			
		||||
    def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
 | 
			
		||||
        """Report download progress."""
 | 
			
		||||
        if self.params.get('noprogress', False):
 | 
			
		||||
            return
 | 
			
		||||
        if self.params.get('progress_with_newline', False):
 | 
			
		||||
            self.to_screen(u'[download] %s of %s at %s ETA %s' %
 | 
			
		||||
                (percent_str, data_len_str, speed_str, eta_str))
 | 
			
		||||
        else:
 | 
			
		||||
            self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
 | 
			
		||||
                (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
 | 
			
		||||
        self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
 | 
			
		||||
                (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
 | 
			
		||||
 | 
			
		||||
    def report_resuming_byte(self, resume_len):
 | 
			
		||||
        """Report attempt to resume at given byte."""
 | 
			
		||||
        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
 | 
			
		||||
 | 
			
		||||
    def report_retry(self, count, retries):
 | 
			
		||||
        """Report retry in case of HTTP error 5xx"""
 | 
			
		||||
        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
 | 
			
		||||
 | 
			
		||||
    def report_file_already_downloaded(self, file_name):
 | 
			
		||||
        """Report file has already been fully downloaded."""
 | 
			
		||||
        try:
 | 
			
		||||
            self.to_screen(u'[download] %s has already been downloaded' % file_name)
 | 
			
		||||
        except (UnicodeEncodeError) as err:
 | 
			
		||||
            self.to_screen(u'[download] The file has already been downloaded')
 | 
			
		||||
 | 
			
		||||
    def report_unable_to_resume(self):
 | 
			
		||||
        """Report it was impossible to resume download."""
 | 
			
		||||
        self.to_screen(u'[download] Unable to resume')
 | 
			
		||||
 | 
			
		||||
    def report_finish(self):
 | 
			
		||||
        """Report download finished."""
 | 
			
		||||
        if self.params.get('noprogress', False):
 | 
			
		||||
            self.to_screen(u'[download] Download completed')
 | 
			
		||||
        else:
 | 
			
		||||
            self.to_screen(u'')
 | 
			
		||||
 | 
			
		||||
    def increment_downloads(self):
 | 
			
		||||
        """Increment the ordinal that assigns a number to each file."""
 | 
			
		||||
        self._num_downloads += 1
 | 
			
		||||
 | 
			
		||||
    def prepare_filename(self, info_dict):
 | 
			
		||||
        """Generate the output filename."""
 | 
			
		||||
        try:
 | 
			
		||||
            template_dict = dict(info_dict)
 | 
			
		||||
 | 
			
		||||
            template_dict['epoch'] = int(time.time())
 | 
			
		||||
            autonumber_size = self.params.get('autonumber_size')
 | 
			
		||||
            if autonumber_size is None:
 | 
			
		||||
                autonumber_size = 5
 | 
			
		||||
            autonumber_templ = u'%0' + str(autonumber_size) + u'd'
 | 
			
		||||
            template_dict['autonumber'] = autonumber_templ % self._num_downloads
 | 
			
		||||
            if template_dict['playlist_index'] is not None:
 | 
			
		||||
                template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
 | 
			
		||||
 | 
			
		||||
            sanitize = lambda k,v: sanitize_filename(
 | 
			
		||||
                u'NA' if v is None else compat_str(v),
 | 
			
		||||
                restricted=self.params.get('restrictfilenames'),
 | 
			
		||||
                is_id=(k==u'id'))
 | 
			
		||||
            template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
 | 
			
		||||
 | 
			
		||||
            filename = self.params['outtmpl'] % template_dict
 | 
			
		||||
            return filename
 | 
			
		||||
        except KeyError as err:
 | 
			
		||||
            self.trouble(u'ERROR: Erroneous output template')
 | 
			
		||||
            return None
 | 
			
		||||
        except ValueError as err:
 | 
			
		||||
            self.trouble(u'ERROR: Insufficient system charset ' + repr(preferredencoding()))
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
    def _match_entry(self, info_dict):
 | 
			
		||||
        """ Returns None iff the file should be downloaded """
 | 
			
		||||
 | 
			
		||||
        title = info_dict['title']
 | 
			
		||||
        matchtitle = self.params.get('matchtitle', False)
 | 
			
		||||
        if matchtitle:
 | 
			
		||||
            if not re.search(matchtitle, title, re.IGNORECASE):
 | 
			
		||||
                return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
 | 
			
		||||
        rejecttitle = self.params.get('rejecttitle', False)
 | 
			
		||||
        if rejecttitle:
 | 
			
		||||
            if re.search(rejecttitle, title, re.IGNORECASE):
 | 
			
		||||
                return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
 | 
			
		||||
        return None
 | 
			
		||||
        
 | 
			
		||||
    def extract_info(self, url, download = True, ie_name = None):
 | 
			
		||||
        '''
 | 
			
		||||
        Returns a list with a dictionary for each video we find.
 | 
			
		||||
        If 'download', also downloads the videos.
 | 
			
		||||
         '''
 | 
			
		||||
        suitable_found = False
 | 
			
		||||
        
 | 
			
		||||
        #We copy the original list
 | 
			
		||||
        ies = list(self._ies)
 | 
			
		||||
 | 
			
		||||
        if ie_name is not None:
 | 
			
		||||
            #We put in the first place the given info extractor
 | 
			
		||||
            first_ie = get_info_extractor(ie_name)()
 | 
			
		||||
            first_ie.set_downloader(self)
 | 
			
		||||
            ies.insert(0, first_ie)
 | 
			
		||||
 | 
			
		||||
        for ie in ies:
 | 
			
		||||
            # Go to next InfoExtractor if not suitable
 | 
			
		||||
            if not ie.suitable(url):
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            # Warn if the _WORKING attribute is False
 | 
			
		||||
            if not ie.working():
 | 
			
		||||
                self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
 | 
			
		||||
                               u'and will probably not work. If you want to go on, use the -i option.')
 | 
			
		||||
 | 
			
		||||
            # Suitable InfoExtractor found
 | 
			
		||||
            suitable_found = True
 | 
			
		||||
 | 
			
		||||
            # Extract information from URL and process it
 | 
			
		||||
            try:
 | 
			
		||||
                ie_results = ie.extract(url)
 | 
			
		||||
                if ie_results is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
 | 
			
		||||
                    break
 | 
			
		||||
                results = []
 | 
			
		||||
                for ie_result in ie_results:
 | 
			
		||||
                    if not 'extractor' in ie_result:
 | 
			
		||||
                        #The extractor has already been set somewhere else
 | 
			
		||||
                        ie_result['extractor'] = ie.IE_NAME
 | 
			
		||||
                    results.append(self.process_ie_result(ie_result, download))
 | 
			
		||||
                return results
 | 
			
		||||
            except ExtractorError as de: # An error we somewhat expected
 | 
			
		||||
                self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
 | 
			
		||||
                break
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                if self.params.get('ignoreerrors', False):
 | 
			
		||||
                    self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
 | 
			
		||||
                    break
 | 
			
		||||
                else:
 | 
			
		||||
                    raise
 | 
			
		||||
        if not suitable_found:
 | 
			
		||||
                self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
 | 
			
		||||
        
 | 
			
		||||
    def process_ie_result(self, ie_result, download = True):
 | 
			
		||||
        """
 | 
			
		||||
        Take the result of the ie and return a list of videos.
 | 
			
		||||
        For url elements it will search the suitable ie and get the videos
 | 
			
		||||
        For playlist elements it will process each of the elements of the 'entries' key
 | 
			
		||||
        
 | 
			
		||||
        It will also download the videos if 'download'.
 | 
			
		||||
        """
 | 
			
		||||
        result_type = ie_result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
 | 
			
		||||
        if result_type == 'video':
 | 
			
		||||
            if 'playlist' not in ie_result:
 | 
			
		||||
                #It isn't part of a playlist
 | 
			
		||||
                ie_result['playlist'] = None
 | 
			
		||||
                ie_result['playlist_index'] = None
 | 
			
		||||
            if download:
 | 
			
		||||
                #Do the download:
 | 
			
		||||
                self.process_info(ie_result)
 | 
			
		||||
            return ie_result
 | 
			
		||||
        elif result_type == 'url':
 | 
			
		||||
            #We get the video pointed by the url
 | 
			
		||||
            result = self.extract_info(ie_result['url'], download, ie_name = ie_result['ie_key'])[0]
 | 
			
		||||
            return result
 | 
			
		||||
        elif result_type == 'playlist':
 | 
			
		||||
            #We process each entry in the playlist
 | 
			
		||||
            playlist = ie_result.get('title', None) or ie_result.get('id', None)
 | 
			
		||||
            self.to_screen(u'[download] Downloading playlist: %s'  % playlist)
 | 
			
		||||
 | 
			
		||||
            playlist_results = []
 | 
			
		||||
 | 
			
		||||
            n_all_entries = len(ie_result['entries'])
 | 
			
		||||
            playliststart = self.params.get('playliststart', 1) - 1
 | 
			
		||||
            playlistend = self.params.get('playlistend', -1)
 | 
			
		||||
 | 
			
		||||
            if playlistend == -1:
 | 
			
		||||
                entries = ie_result['entries'][playliststart:]
 | 
			
		||||
            else:
 | 
			
		||||
                entries = ie_result['entries'][playliststart:playlistend]
 | 
			
		||||
 | 
			
		||||
            n_entries = len(entries)
 | 
			
		||||
 | 
			
		||||
            self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
 | 
			
		||||
                (ie_result['extractor'], playlist, n_all_entries, n_entries))
 | 
			
		||||
 | 
			
		||||
            for i,entry in enumerate(entries,1):
 | 
			
		||||
                self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
 | 
			
		||||
                entry_result = self.process_ie_result(entry, False)
 | 
			
		||||
                entry_result['playlist'] = playlist
 | 
			
		||||
                entry_result['playlist_index'] = i + playliststart
 | 
			
		||||
                #We must do the download here to correctly set the 'playlist' key
 | 
			
		||||
                if download:
 | 
			
		||||
                    self.process_info(entry_result)
 | 
			
		||||
                playlist_results.append(entry_result)
 | 
			
		||||
            result = ie_result.copy()
 | 
			
		||||
            result['entries'] = playlist_results
 | 
			
		||||
            return result
 | 
			
		||||
 | 
			
		||||
    def process_info(self, info_dict):
 | 
			
		||||
        """Process a single dictionary returned by an InfoExtractor."""
 | 
			
		||||
 | 
			
		||||
        #We increment the download the download count here to match the previous behaviour.
 | 
			
		||||
        self.increment_downloads()
 | 
			
		||||
        
 | 
			
		||||
        info_dict['fulltitle'] = info_dict['title']
 | 
			
		||||
        if len(info_dict['title']) > 200:
 | 
			
		||||
            info_dict['title'] = info_dict['title'][:197] + u'...'
 | 
			
		||||
 | 
			
		||||
        # Keep for backwards compatibility
 | 
			
		||||
        info_dict['stitle'] = info_dict['title']
 | 
			
		||||
 | 
			
		||||
        if not 'format' in info_dict:
 | 
			
		||||
            info_dict['format'] = info_dict['ext']
 | 
			
		||||
 | 
			
		||||
        reason = self._match_entry(info_dict)
 | 
			
		||||
        if reason is not None:
 | 
			
		||||
            self.to_screen(u'[download] ' + reason)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        max_downloads = self.params.get('max_downloads')
 | 
			
		||||
        if max_downloads is not None:
 | 
			
		||||
            if self._num_downloads > int(max_downloads):
 | 
			
		||||
                raise MaxDownloadsReached()
 | 
			
		||||
 | 
			
		||||
        filename = self.prepare_filename(info_dict)
 | 
			
		||||
 | 
			
		||||
        # Forced printings
 | 
			
		||||
        if self.params.get('forcetitle', False):
 | 
			
		||||
            compat_print(info_dict['title'])
 | 
			
		||||
        if self.params.get('forceurl', False):
 | 
			
		||||
            compat_print(info_dict['url'])
 | 
			
		||||
        if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
 | 
			
		||||
            compat_print(info_dict['thumbnail'])
 | 
			
		||||
        if self.params.get('forcedescription', False) and 'description' in info_dict:
 | 
			
		||||
            compat_print(info_dict['description'])
 | 
			
		||||
        if self.params.get('forcefilename', False) and filename is not None:
 | 
			
		||||
            compat_print(filename)
 | 
			
		||||
        if self.params.get('forceformat', False):
 | 
			
		||||
            compat_print(info_dict['format'])
 | 
			
		||||
 | 
			
		||||
        # Do nothing else if in simulate mode
 | 
			
		||||
        if self.params.get('simulate', False):
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        if filename is None:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            dn = os.path.dirname(encodeFilename(filename))
 | 
			
		||||
            if dn != '' and not os.path.exists(dn): # dn is already encoded
 | 
			
		||||
                os.makedirs(dn)
 | 
			
		||||
        except (OSError, IOError) as err:
 | 
			
		||||
            self.report_error(u'unable to create directory ' + compat_str(err))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        if self.params.get('writedescription', False):
 | 
			
		||||
            try:
 | 
			
		||||
                descfn = filename + u'.description'
 | 
			
		||||
                self.report_writedescription(descfn)
 | 
			
		||||
                with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
 | 
			
		||||
                    descfile.write(info_dict['description'])
 | 
			
		||||
            except (OSError, IOError):
 | 
			
		||||
                self.report_error(u'Cannot write description file ' + descfn)
 | 
			
		||||
                return
 | 
			
		||||
 | 
			
		||||
        if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
 | 
			
		||||
            # subtitles download errors are already managed as troubles in relevant IE
 | 
			
		||||
            # that way it will silently go on when used with unsupporting IE
 | 
			
		||||
            subtitle = info_dict['subtitles'][0]
 | 
			
		||||
            (sub_error, sub_lang, sub) = subtitle
 | 
			
		||||
            sub_format = self.params.get('subtitlesformat')
 | 
			
		||||
            if sub_error:
 | 
			
		||||
                self.report_warning("Some error while getting the subtitles")
 | 
			
		||||
            else:
 | 
			
		||||
                try:
 | 
			
		||||
                    sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
 | 
			
		||||
                    self.report_writesubtitles(sub_filename)
 | 
			
		||||
                    with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
 | 
			
		||||
                        subfile.write(sub)
 | 
			
		||||
                except (OSError, IOError):
 | 
			
		||||
                    self.report_error(u'Cannot write subtitles file ' + descfn)
 | 
			
		||||
                    return
 | 
			
		||||
            if self.params.get('onlysubtitles', False):
 | 
			
		||||
                return 
 | 
			
		||||
 | 
			
		||||
        if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
 | 
			
		||||
            subtitles = info_dict['subtitles']
 | 
			
		||||
            sub_format = self.params.get('subtitlesformat')
 | 
			
		||||
            for subtitle in subtitles:
 | 
			
		||||
                (sub_error, sub_lang, sub) = subtitle
 | 
			
		||||
                if sub_error:
 | 
			
		||||
                    self.report_warning("Some error while getting the subtitles")
 | 
			
		||||
                else:
 | 
			
		||||
                    try:
 | 
			
		||||
                        sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
 | 
			
		||||
                        self.report_writesubtitles(sub_filename)
 | 
			
		||||
                        with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
 | 
			
		||||
                                subfile.write(sub)
 | 
			
		||||
                    except (OSError, IOError):
 | 
			
		||||
                        self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
 | 
			
		||||
                        return
 | 
			
		||||
            if self.params.get('onlysubtitles', False):
 | 
			
		||||
                return 
 | 
			
		||||
 | 
			
		||||
        if self.params.get('writeinfojson', False):
 | 
			
		||||
            infofn = filename + u'.info.json'
 | 
			
		||||
            self.report_writeinfojson(infofn)
 | 
			
		||||
            try:
 | 
			
		||||
                json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
 | 
			
		||||
                write_json_file(json_info_dict, encodeFilename(infofn))
 | 
			
		||||
            except (OSError, IOError):
 | 
			
		||||
                self.report_error(u'Cannot write metadata to JSON file ' + infofn)
 | 
			
		||||
                return
 | 
			
		||||
 | 
			
		||||
        if not self.params.get('skip_download', False):
 | 
			
		||||
            if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
 | 
			
		||||
                success = True
 | 
			
		||||
            else:
 | 
			
		||||
                try:
 | 
			
		||||
                    success = self._do_download(filename, info_dict)
 | 
			
		||||
                except (OSError, IOError) as err:
 | 
			
		||||
                    raise UnavailableVideoError()
 | 
			
		||||
                except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 | 
			
		||||
                    self.report_error(u'unable to download video data: %s' % str(err))
 | 
			
		||||
                    return
 | 
			
		||||
                except (ContentTooShortError, ) as err:
 | 
			
		||||
                    self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
 | 
			
		||||
                    return
 | 
			
		||||
 | 
			
		||||
            if success:
 | 
			
		||||
                try:
 | 
			
		||||
                    self.post_process(filename, info_dict)
 | 
			
		||||
                except (PostProcessingError) as err:
 | 
			
		||||
                    self.report_error(u'postprocessing: %s' % str(err))
 | 
			
		||||
                    return
 | 
			
		||||
 | 
			
		||||
    def download(self, url_list):
 | 
			
		||||
        """Download a given list of URLs."""
 | 
			
		||||
        if len(url_list) > 1 and self.fixed_template():
 | 
			
		||||
            raise SameFileError(self.params['outtmpl'])
 | 
			
		||||
 | 
			
		||||
        for url in url_list:
 | 
			
		||||
            try:
 | 
			
		||||
                #It also downloads the videos
 | 
			
		||||
                videos = self.extract_info(url)
 | 
			
		||||
            except UnavailableVideoError:
 | 
			
		||||
                self.trouble(u'\nERROR: unable to download video')
 | 
			
		||||
            except MaxDownloadsReached:
 | 
			
		||||
                self.to_screen(u'[info] Maximum number of downloaded files reached.')
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
        return self._download_retcode
 | 
			
		||||
 | 
			
		||||
    def post_process(self, filename, ie_info):
 | 
			
		||||
        """Run all the postprocessors on the given file."""
 | 
			
		||||
        info = dict(ie_info)
 | 
			
		||||
        info['filepath'] = filename
 | 
			
		||||
        keep_video = None
 | 
			
		||||
        for pp in self._pps:
 | 
			
		||||
            try:
 | 
			
		||||
                keep_video_wish,new_info = pp.run(info)
 | 
			
		||||
                if keep_video_wish is not None:
 | 
			
		||||
                    if keep_video_wish:
 | 
			
		||||
                        keep_video = keep_video_wish
 | 
			
		||||
                    elif keep_video is None:
 | 
			
		||||
                        # No clear decision yet, let IE decide
 | 
			
		||||
                        keep_video = keep_video_wish
 | 
			
		||||
            except PostProcessingError as e:
 | 
			
		||||
                self.to_stderr(u'ERROR: ' + e.msg)
 | 
			
		||||
        if keep_video is False and not self.params.get('keepvideo', False):
 | 
			
		||||
            try:
 | 
			
		||||
                self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
 | 
			
		||||
                os.remove(encodeFilename(filename))
 | 
			
		||||
            except (IOError, OSError):
 | 
			
		||||
                self.report_warning(u'Unable to remove downloaded video file')
 | 
			
		||||
 | 
			
		||||
    def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path):
 | 
			
		||||
        self.report_destination(filename)
 | 
			
		||||
        tmpfilename = self.temp_name(filename)
 | 
			
		||||
 | 
			
		||||
        # Check for rtmpdump first
 | 
			
		||||
        try:
 | 
			
		||||
            subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
 | 
			
		||||
        except (OSError, IOError):
 | 
			
		||||
            self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        # Download using rtmpdump. rtmpdump returns exit code 2 when
 | 
			
		||||
        # the connection was interrumpted and resuming appears to be
 | 
			
		||||
        # possible. This is part of rtmpdump's normal usage, AFAIK.
 | 
			
		||||
        basic_args = ['rtmpdump', '-q', '-r', url, '-o', tmpfilename]
 | 
			
		||||
        if player_url is not None:
 | 
			
		||||
            basic_args += ['-W', player_url]
 | 
			
		||||
        if page_url is not None:
 | 
			
		||||
            basic_args += ['--pageUrl', page_url]
 | 
			
		||||
        if play_path is not None:
 | 
			
		||||
            basic_args += ['-y', play_path]
 | 
			
		||||
        args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
 | 
			
		||||
        if self.params.get('verbose', False):
 | 
			
		||||
            try:
 | 
			
		||||
                import pipes
 | 
			
		||||
                shell_quote = lambda args: ' '.join(map(pipes.quote, args))
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                shell_quote = repr
 | 
			
		||||
            self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
 | 
			
		||||
        retval = subprocess.call(args)
 | 
			
		||||
        while retval == 2 or retval == 1:
 | 
			
		||||
            prevsize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
 | 
			
		||||
            time.sleep(5.0) # This seems to be needed
 | 
			
		||||
            retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
 | 
			
		||||
            cursize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            if prevsize == cursize and retval == 1:
 | 
			
		||||
                break
 | 
			
		||||
             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
 | 
			
		||||
            if prevsize == cursize and retval == 2 and cursize > 1024:
 | 
			
		||||
                self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
 | 
			
		||||
                retval = 0
 | 
			
		||||
                break
 | 
			
		||||
        if retval == 0:
 | 
			
		||||
            fsize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            self.to_screen(u'\r[rtmpdump] %s bytes' % fsize)
 | 
			
		||||
            self.try_rename(tmpfilename, filename)
 | 
			
		||||
            self._hook_progress({
 | 
			
		||||
                'downloaded_bytes': fsize,
 | 
			
		||||
                'total_bytes': fsize,
 | 
			
		||||
                'filename': filename,
 | 
			
		||||
                'status': 'finished',
 | 
			
		||||
            })
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            self.to_stderr(u"\n")
 | 
			
		||||
            self.report_error(u'rtmpdump exited with code %d' % retval)
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    def _do_download(self, filename, info_dict):
 | 
			
		||||
        url = info_dict['url']
 | 
			
		||||
 | 
			
		||||
        # Check file already present
 | 
			
		||||
        if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
 | 
			
		||||
            self.report_file_already_downloaded(filename)
 | 
			
		||||
            self._hook_progress({
 | 
			
		||||
                'filename': filename,
 | 
			
		||||
                'status': 'finished',
 | 
			
		||||
            })
 | 
			
		||||
            return True
 | 
			
		||||
 | 
			
		||||
        # Attempt to download using rtmpdump
 | 
			
		||||
        if url.startswith('rtmp'):
 | 
			
		||||
            return self._download_with_rtmpdump(filename, url,
 | 
			
		||||
                                                info_dict.get('player_url', None),
 | 
			
		||||
                                                info_dict.get('page_url', None),
 | 
			
		||||
                                                info_dict.get('play_path', None))
 | 
			
		||||
 | 
			
		||||
        tmpfilename = self.temp_name(filename)
 | 
			
		||||
        stream = None
 | 
			
		||||
 | 
			
		||||
        # Do not include the Accept-Encoding header
 | 
			
		||||
        headers = {'Youtubedl-no-compression': 'True'}
 | 
			
		||||
        if 'user_agent' in info_dict:
 | 
			
		||||
            headers['Youtubedl-user-agent'] = info_dict['user_agent']
 | 
			
		||||
        basic_request = compat_urllib_request.Request(url, None, headers)
 | 
			
		||||
        request = compat_urllib_request.Request(url, None, headers)
 | 
			
		||||
 | 
			
		||||
        if self.params.get('test', False):
 | 
			
		||||
            request.add_header('Range','bytes=0-10240')
 | 
			
		||||
 | 
			
		||||
        # Establish possible resume length
 | 
			
		||||
        if os.path.isfile(encodeFilename(tmpfilename)):
 | 
			
		||||
            resume_len = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
        else:
 | 
			
		||||
            resume_len = 0
 | 
			
		||||
 | 
			
		||||
        open_mode = 'wb'
 | 
			
		||||
        if resume_len != 0:
 | 
			
		||||
            if self.params.get('continuedl', False):
 | 
			
		||||
                self.report_resuming_byte(resume_len)
 | 
			
		||||
                request.add_header('Range','bytes=%d-' % resume_len)
 | 
			
		||||
                open_mode = 'ab'
 | 
			
		||||
            else:
 | 
			
		||||
                resume_len = 0
 | 
			
		||||
 | 
			
		||||
        count = 0
 | 
			
		||||
        retries = self.params.get('retries', 0)
 | 
			
		||||
        while count <= retries:
 | 
			
		||||
            # Establish connection
 | 
			
		||||
            try:
 | 
			
		||||
                if count == 0 and 'urlhandle' in info_dict:
 | 
			
		||||
                    data = info_dict['urlhandle']
 | 
			
		||||
                data = compat_urllib_request.urlopen(request)
 | 
			
		||||
                break
 | 
			
		||||
            except (compat_urllib_error.HTTPError, ) as err:
 | 
			
		||||
                if (err.code < 500 or err.code >= 600) and err.code != 416:
 | 
			
		||||
                    # Unexpected HTTP error
 | 
			
		||||
                    raise
 | 
			
		||||
                elif err.code == 416:
 | 
			
		||||
                    # Unable to resume (requested range not satisfiable)
 | 
			
		||||
                    try:
 | 
			
		||||
                        # Open the connection again without the range header
 | 
			
		||||
                        data = compat_urllib_request.urlopen(basic_request)
 | 
			
		||||
                        content_length = data.info()['Content-Length']
 | 
			
		||||
                    except (compat_urllib_error.HTTPError, ) as err:
 | 
			
		||||
                        if err.code < 500 or err.code >= 600:
 | 
			
		||||
                            raise
 | 
			
		||||
                    else:
 | 
			
		||||
                        # Examine the reported length
 | 
			
		||||
                        if (content_length is not None and
 | 
			
		||||
                                (resume_len - 100 < int(content_length) < resume_len + 100)):
 | 
			
		||||
                            # The file had already been fully downloaded.
 | 
			
		||||
                            # Explanation to the above condition: in issue #175 it was revealed that
 | 
			
		||||
                            # YouTube sometimes adds or removes a few bytes from the end of the file,
 | 
			
		||||
                            # changing the file size slightly and causing problems for some users. So
 | 
			
		||||
                            # I decided to implement a suggested change and consider the file
 | 
			
		||||
                            # completely downloaded if the file size differs less than 100 bytes from
 | 
			
		||||
                            # the one in the hard drive.
 | 
			
		||||
                            self.report_file_already_downloaded(filename)
 | 
			
		||||
                            self.try_rename(tmpfilename, filename)
 | 
			
		||||
                            self._hook_progress({
 | 
			
		||||
                                'filename': filename,
 | 
			
		||||
                                'status': 'finished',
 | 
			
		||||
                            })
 | 
			
		||||
                            return True
 | 
			
		||||
                        else:
 | 
			
		||||
                            # The length does not match, we start the download over
 | 
			
		||||
                            self.report_unable_to_resume()
 | 
			
		||||
                            open_mode = 'wb'
 | 
			
		||||
                            break
 | 
			
		||||
            # Retry
 | 
			
		||||
            count += 1
 | 
			
		||||
            if count <= retries:
 | 
			
		||||
                self.report_retry(count, retries)
 | 
			
		||||
 | 
			
		||||
        if count > retries:
 | 
			
		||||
            self.report_error(u'giving up after %s retries' % retries)
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        data_len = data.info().get('Content-length', None)
 | 
			
		||||
        if data_len is not None:
 | 
			
		||||
            data_len = int(data_len) + resume_len
 | 
			
		||||
            min_data_len = self.params.get("min_filesize", None)
 | 
			
		||||
            max_data_len =  self.params.get("max_filesize", None)
 | 
			
		||||
            if min_data_len is not None and data_len < min_data_len:
 | 
			
		||||
                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
 | 
			
		||||
                return False
 | 
			
		||||
            if max_data_len is not None and data_len > max_data_len:
 | 
			
		||||
                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
 | 
			
		||||
                return False
 | 
			
		||||
 | 
			
		||||
        data_len_str = self.format_bytes(data_len)
 | 
			
		||||
        byte_counter = 0 + resume_len
 | 
			
		||||
        block_size = self.params.get('buffersize', 1024)
 | 
			
		||||
        start = time.time()
 | 
			
		||||
        while True:
 | 
			
		||||
            # Download and write
 | 
			
		||||
            before = time.time()
 | 
			
		||||
            data_block = data.read(block_size)
 | 
			
		||||
            after = time.time()
 | 
			
		||||
            if len(data_block) == 0:
 | 
			
		||||
                break
 | 
			
		||||
            byte_counter += len(data_block)
 | 
			
		||||
 | 
			
		||||
            # Open file just in time
 | 
			
		||||
            if stream is None:
 | 
			
		||||
                try:
 | 
			
		||||
                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
 | 
			
		||||
                    assert stream is not None
 | 
			
		||||
                    filename = self.undo_temp_name(tmpfilename)
 | 
			
		||||
                    self.report_destination(filename)
 | 
			
		||||
                except (OSError, IOError) as err:
 | 
			
		||||
                    self.report_error(u'unable to open for writing: %s' % str(err))
 | 
			
		||||
                    return False
 | 
			
		||||
            try:
 | 
			
		||||
                stream.write(data_block)
 | 
			
		||||
            except (IOError, OSError) as err:
 | 
			
		||||
                self.to_stderr(u"\n")
 | 
			
		||||
                self.report_error(u'unable to write data: %s' % str(err))
 | 
			
		||||
                return False
 | 
			
		||||
            if not self.params.get('noresizebuffer', False):
 | 
			
		||||
                block_size = self.best_block_size(after - before, len(data_block))
 | 
			
		||||
 | 
			
		||||
            # Progress message
 | 
			
		||||
            speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
 | 
			
		||||
            if data_len is None:
 | 
			
		||||
                self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
 | 
			
		||||
            else:
 | 
			
		||||
                percent_str = self.calc_percent(byte_counter, data_len)
 | 
			
		||||
                eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
 | 
			
		||||
                self.report_progress(percent_str, data_len_str, speed_str, eta_str)
 | 
			
		||||
 | 
			
		||||
            self._hook_progress({
 | 
			
		||||
                'downloaded_bytes': byte_counter,
 | 
			
		||||
                'total_bytes': data_len,
 | 
			
		||||
                'tmpfilename': tmpfilename,
 | 
			
		||||
                'filename': filename,
 | 
			
		||||
                'status': 'downloading',
 | 
			
		||||
            })
 | 
			
		||||
 | 
			
		||||
            # Apply rate limit
 | 
			
		||||
            self.slow_down(start, byte_counter - resume_len)
 | 
			
		||||
 | 
			
		||||
        if stream is None:
 | 
			
		||||
            self.to_stderr(u"\n")
 | 
			
		||||
            self.report_error(u'Did not get any data blocks')
 | 
			
		||||
            return False
 | 
			
		||||
        stream.close()
 | 
			
		||||
        self.report_finish()
 | 
			
		||||
        if data_len is not None and byte_counter != data_len:
 | 
			
		||||
            raise ContentTooShortError(byte_counter, int(data_len))
 | 
			
		||||
        self.try_rename(tmpfilename, filename)
 | 
			
		||||
 | 
			
		||||
        # Update file modification time
 | 
			
		||||
        if self.params.get('updatetime', True):
 | 
			
		||||
            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
 | 
			
		||||
 | 
			
		||||
        self._hook_progress({
 | 
			
		||||
            'downloaded_bytes': byte_counter,
 | 
			
		||||
            'total_bytes': byte_counter,
 | 
			
		||||
            'filename': filename,
 | 
			
		||||
            'status': 'finished',
 | 
			
		||||
        })
 | 
			
		||||
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    def _hook_progress(self, status):
 | 
			
		||||
        for ph in self._progress_hooks:
 | 
			
		||||
            ph(status)
 | 
			
		||||
 | 
			
		||||
    def add_progress_hook(self, ph):
 | 
			
		||||
        """ ph gets called on download progress, with a dictionary with the entries
 | 
			
		||||
        * filename: The final filename
 | 
			
		||||
        * status: One of "downloading" and "finished"
 | 
			
		||||
 | 
			
		||||
        It can also have some of the following entries:
 | 
			
		||||
 | 
			
		||||
        * downloaded_bytes: Bytes on disks
 | 
			
		||||
        * total_bytes: Total bytes, None if unknown
 | 
			
		||||
        * tmpfilename: The filename we're currently writing to
 | 
			
		||||
 | 
			
		||||
        Hooks are guaranteed to be called at least once (with status "finished")
 | 
			
		||||
        if the download is successful.
 | 
			
		||||
        """
 | 
			
		||||
        self._progress_hooks.append(ph)
 | 
			
		||||
							
								
								
									
										4389
									
								
								youtube_dl/InfoExtractors.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										4389
									
								
								youtube_dl/InfoExtractors.py
									
									
									
									
									
										Executable file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										232
									
								
								youtube_dl/PostProcessor.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										232
									
								
								youtube_dl/PostProcessor.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,232 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
from .utils import *
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PostProcessor(object):
 | 
			
		||||
    """Post Processor class.
 | 
			
		||||
 | 
			
		||||
    PostProcessor objects can be added to downloaders with their
 | 
			
		||||
    add_post_processor() method. When the downloader has finished a
 | 
			
		||||
    successful download, it will take its internal chain of PostProcessors
 | 
			
		||||
    and start calling the run() method on each one of them, first with
 | 
			
		||||
    an initial argument and then with the returned value of the previous
 | 
			
		||||
    PostProcessor.
 | 
			
		||||
 | 
			
		||||
    The chain will be stopped if one of them ever returns None or the end
 | 
			
		||||
    of the chain is reached.
 | 
			
		||||
 | 
			
		||||
    PostProcessor objects follow a "mutual registration" process similar
 | 
			
		||||
    to InfoExtractor objects.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    _downloader = None
 | 
			
		||||
 | 
			
		||||
    def __init__(self, downloader=None):
 | 
			
		||||
        self._downloader = downloader
 | 
			
		||||
 | 
			
		||||
    def set_downloader(self, downloader):
 | 
			
		||||
        """Sets the downloader for this PP."""
 | 
			
		||||
        self._downloader = downloader
 | 
			
		||||
 | 
			
		||||
    def run(self, information):
 | 
			
		||||
        """Run the PostProcessor.
 | 
			
		||||
 | 
			
		||||
        The "information" argument is a dictionary like the ones
 | 
			
		||||
        composed by InfoExtractors. The only difference is that this
 | 
			
		||||
        one has an extra field called "filepath" that points to the
 | 
			
		||||
        downloaded file.
 | 
			
		||||
 | 
			
		||||
        This method returns a tuple, the first element of which describes
 | 
			
		||||
        whether the original file should be kept (i.e. not deleted - None for
 | 
			
		||||
        no preference), and the second of which is the updated information.
 | 
			
		||||
 | 
			
		||||
        In addition, this method may raise a PostProcessingError
 | 
			
		||||
        exception if post processing fails.
 | 
			
		||||
        """
 | 
			
		||||
        return None, information # by default, keep file and do nothing
 | 
			
		||||
 | 
			
		||||
class FFmpegPostProcessorError(PostProcessingError):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
class AudioConversionError(PostProcessingError):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
class FFmpegPostProcessor(PostProcessor):
 | 
			
		||||
    def __init__(self,downloader=None):
 | 
			
		||||
        PostProcessor.__init__(self, downloader)
 | 
			
		||||
        self._exes = self.detect_executables()
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def detect_executables():
 | 
			
		||||
        def executable(exe):
 | 
			
		||||
            try:
 | 
			
		||||
                subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
 | 
			
		||||
            except OSError:
 | 
			
		||||
                return False
 | 
			
		||||
            return exe
 | 
			
		||||
        programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
 | 
			
		||||
        return dict((program, executable(program)) for program in programs)
 | 
			
		||||
 | 
			
		||||
    def run_ffmpeg(self, path, out_path, opts):
 | 
			
		||||
        if not self._exes['ffmpeg'] and not self._exes['avconv']:
 | 
			
		||||
            raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
 | 
			
		||||
        cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path)]
 | 
			
		||||
               + opts +
 | 
			
		||||
               [encodeFilename(self._ffmpeg_filename_argument(out_path))])
 | 
			
		||||
        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 | 
			
		||||
        stdout,stderr = p.communicate()
 | 
			
		||||
        if p.returncode != 0:
 | 
			
		||||
            msg = stderr.strip().split('\n')[-1]
 | 
			
		||||
            raise FFmpegPostProcessorError(msg.decode('utf-8', 'replace'))
 | 
			
		||||
 | 
			
		||||
    def _ffmpeg_filename_argument(self, fn):
 | 
			
		||||
        # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
 | 
			
		||||
        if fn.startswith(u'-'):
 | 
			
		||||
            return u'./' + fn
 | 
			
		||||
        return fn
 | 
			
		||||
 | 
			
		||||
class FFmpegExtractAudioPP(FFmpegPostProcessor):
 | 
			
		||||
    def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
 | 
			
		||||
        FFmpegPostProcessor.__init__(self, downloader)
 | 
			
		||||
        if preferredcodec is None:
 | 
			
		||||
            preferredcodec = 'best'
 | 
			
		||||
        self._preferredcodec = preferredcodec
 | 
			
		||||
        self._preferredquality = preferredquality
 | 
			
		||||
        self._nopostoverwrites = nopostoverwrites
 | 
			
		||||
 | 
			
		||||
    def get_audio_codec(self, path):
 | 
			
		||||
        if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
 | 
			
		||||
        try:
 | 
			
		||||
            cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
 | 
			
		||||
            handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
 | 
			
		||||
            output = handle.communicate()[0]
 | 
			
		||||
            if handle.wait() != 0:
 | 
			
		||||
                return None
 | 
			
		||||
        except (IOError, OSError):
 | 
			
		||||
            return None
 | 
			
		||||
        audio_codec = None
 | 
			
		||||
        for line in output.decode('ascii', 'ignore').split('\n'):
 | 
			
		||||
            if line.startswith('codec_name='):
 | 
			
		||||
                audio_codec = line.split('=')[1].strip()
 | 
			
		||||
            elif line.strip() == 'codec_type=audio' and audio_codec is not None:
 | 
			
		||||
                return audio_codec
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def run_ffmpeg(self, path, out_path, codec, more_opts):
 | 
			
		||||
        if not self._exes['ffmpeg'] and not self._exes['avconv']:
 | 
			
		||||
            raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
 | 
			
		||||
        if codec is None:
 | 
			
		||||
            acodec_opts = []
 | 
			
		||||
        else:
 | 
			
		||||
            acodec_opts = ['-acodec', codec]
 | 
			
		||||
        opts = ['-vn'] + acodec_opts + more_opts
 | 
			
		||||
        try:
 | 
			
		||||
            FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
 | 
			
		||||
        except FFmpegPostProcessorError as err:
 | 
			
		||||
            raise AudioConversionError(err.message)
 | 
			
		||||
 | 
			
		||||
    def run(self, information):
 | 
			
		||||
        path = information['filepath']
 | 
			
		||||
 | 
			
		||||
        filecodec = self.get_audio_codec(path)
 | 
			
		||||
        if filecodec is None:
 | 
			
		||||
            raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
 | 
			
		||||
 | 
			
		||||
        more_opts = []
 | 
			
		||||
        if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
 | 
			
		||||
            if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
 | 
			
		||||
                # Lossless, but in another container
 | 
			
		||||
                acodec = 'copy'
 | 
			
		||||
                extension = 'm4a'
 | 
			
		||||
                more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
 | 
			
		||||
            elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
 | 
			
		||||
                # Lossless if possible
 | 
			
		||||
                acodec = 'copy'
 | 
			
		||||
                extension = filecodec
 | 
			
		||||
                if filecodec == 'aac':
 | 
			
		||||
                    more_opts = ['-f', 'adts']
 | 
			
		||||
                if filecodec == 'vorbis':
 | 
			
		||||
                    extension = 'ogg'
 | 
			
		||||
            else:
 | 
			
		||||
                # MP3 otherwise.
 | 
			
		||||
                acodec = 'libmp3lame'
 | 
			
		||||
                extension = 'mp3'
 | 
			
		||||
                more_opts = []
 | 
			
		||||
                if self._preferredquality is not None:
 | 
			
		||||
                    if int(self._preferredquality) < 10:
 | 
			
		||||
                        more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
 | 
			
		||||
                    else:
 | 
			
		||||
                        more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
 | 
			
		||||
        else:
 | 
			
		||||
            # We convert the audio (lossy)
 | 
			
		||||
            acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
 | 
			
		||||
            extension = self._preferredcodec
 | 
			
		||||
            more_opts = []
 | 
			
		||||
            if self._preferredquality is not None:
 | 
			
		||||
                if int(self._preferredquality) < 10:
 | 
			
		||||
                    more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
 | 
			
		||||
                else:
 | 
			
		||||
                    more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
 | 
			
		||||
            if self._preferredcodec == 'aac':
 | 
			
		||||
                more_opts += ['-f', 'adts']
 | 
			
		||||
            if self._preferredcodec == 'm4a':
 | 
			
		||||
                more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
 | 
			
		||||
            if self._preferredcodec == 'vorbis':
 | 
			
		||||
                extension = 'ogg'
 | 
			
		||||
            if self._preferredcodec == 'wav':
 | 
			
		||||
                extension = 'wav'
 | 
			
		||||
                more_opts += ['-f', 'wav']
 | 
			
		||||
 | 
			
		||||
        prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
 | 
			
		||||
        new_path = prefix + sep + extension
 | 
			
		||||
        try:
 | 
			
		||||
            if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
 | 
			
		||||
                self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
 | 
			
		||||
            else:
 | 
			
		||||
                self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
 | 
			
		||||
                self.run_ffmpeg(path, new_path, acodec, more_opts)
 | 
			
		||||
        except:
 | 
			
		||||
            etype,e,tb = sys.exc_info()
 | 
			
		||||
            if isinstance(e, AudioConversionError):
 | 
			
		||||
                msg = u'audio conversion failed: ' + e.message
 | 
			
		||||
            else:
 | 
			
		||||
                msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
 | 
			
		||||
            raise PostProcessingError(msg)
 | 
			
		||||
 | 
			
		||||
        # Try to update the date time for extracted audio file.
 | 
			
		||||
        if information.get('filetime') is not None:
 | 
			
		||||
            try:
 | 
			
		||||
                os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
 | 
			
		||||
            except:
 | 
			
		||||
                self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
 | 
			
		||||
 | 
			
		||||
        information['filepath'] = new_path
 | 
			
		||||
        return False,information
 | 
			
		||||
 | 
			
		||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
 | 
			
		||||
    def __init__(self, downloader=None,preferedformat=None):
 | 
			
		||||
        super(FFmpegVideoConvertor, self).__init__(downloader)
 | 
			
		||||
        self._preferedformat=preferedformat
 | 
			
		||||
 | 
			
		||||
    def run(self, information):
 | 
			
		||||
        path = information['filepath']
 | 
			
		||||
        prefix, sep, ext = path.rpartition(u'.')
 | 
			
		||||
        outpath = prefix + sep + self._preferedformat
 | 
			
		||||
        if information['ext'] == self._preferedformat:
 | 
			
		||||
            self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
 | 
			
		||||
            return True,information
 | 
			
		||||
        self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
 | 
			
		||||
        self.run_ffmpeg(path, outpath, [])
 | 
			
		||||
        information['filepath'] = outpath
 | 
			
		||||
        information['format'] = self._preferedformat
 | 
			
		||||
        information['ext'] = self._preferedformat
 | 
			
		||||
        return False,information
 | 
			
		||||
							
								
								
									
										564
									
								
								youtube_dl/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										564
									
								
								youtube_dl/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,564 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
from __future__ import with_statement
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
__authors__  = (
 | 
			
		||||
    'Ricardo Garcia Gonzalez',
 | 
			
		||||
    'Danny Colligan',
 | 
			
		||||
    'Benjamin Johnson',
 | 
			
		||||
    'Vasyl\' Vavrychuk',
 | 
			
		||||
    'Witold Baryluk',
 | 
			
		||||
    'Paweł Paprota',
 | 
			
		||||
    'Gergely Imreh',
 | 
			
		||||
    'Rogério Brito',
 | 
			
		||||
    'Philipp Hagemeister',
 | 
			
		||||
    'Sören Schulze',
 | 
			
		||||
    'Kevin Ngo',
 | 
			
		||||
    'Ori Avtalion',
 | 
			
		||||
    'shizeeg',
 | 
			
		||||
    'Filippo Valsorda',
 | 
			
		||||
    'Christian Albrecht',
 | 
			
		||||
    'Dave Vasilevsky',
 | 
			
		||||
    'Jaime Marquínez Ferrándiz',
 | 
			
		||||
    'Jeff Crouse',
 | 
			
		||||
    'Osama Khalid',
 | 
			
		||||
    'Michael Walter',
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
__license__ = 'Public Domain'
 | 
			
		||||
 | 
			
		||||
import getpass
 | 
			
		||||
import optparse
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import shlex
 | 
			
		||||
import socket
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
import warnings
 | 
			
		||||
import platform
 | 
			
		||||
 | 
			
		||||
from .utils import *
 | 
			
		||||
from .update import update_self
 | 
			
		||||
from .version import __version__
 | 
			
		||||
from .FileDownloader import *
 | 
			
		||||
from .InfoExtractors import gen_extractors
 | 
			
		||||
from .PostProcessor import *
 | 
			
		||||
 | 
			
		||||
def parseOpts():
 | 
			
		||||
    def _readOptions(filename_bytes):
 | 
			
		||||
        try:
 | 
			
		||||
            optionf = open(filename_bytes)
 | 
			
		||||
        except IOError:
 | 
			
		||||
            return [] # silently skip if file is not present
 | 
			
		||||
        try:
 | 
			
		||||
            res = []
 | 
			
		||||
            for l in optionf:
 | 
			
		||||
                res += shlex.split(l, comments=True)
 | 
			
		||||
        finally:
 | 
			
		||||
            optionf.close()
 | 
			
		||||
        return res
 | 
			
		||||
 | 
			
		||||
    def _format_option_string(option):
 | 
			
		||||
        ''' ('-o', '--option') -> -o, --format METAVAR'''
 | 
			
		||||
 | 
			
		||||
        opts = []
 | 
			
		||||
 | 
			
		||||
        if option._short_opts:
 | 
			
		||||
            opts.append(option._short_opts[0])
 | 
			
		||||
        if option._long_opts:
 | 
			
		||||
            opts.append(option._long_opts[0])
 | 
			
		||||
        if len(opts) > 1:
 | 
			
		||||
            opts.insert(1, ', ')
 | 
			
		||||
 | 
			
		||||
        if option.takes_value(): opts.append(' %s' % option.metavar)
 | 
			
		||||
 | 
			
		||||
        return "".join(opts)
 | 
			
		||||
 | 
			
		||||
    def _find_term_columns():
 | 
			
		||||
        columns = os.environ.get('COLUMNS', None)
 | 
			
		||||
        if columns:
 | 
			
		||||
            return int(columns)
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 | 
			
		||||
            out,err = sp.communicate()
 | 
			
		||||
            return int(out.split()[1])
 | 
			
		||||
        except:
 | 
			
		||||
            pass
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    max_width = 80
 | 
			
		||||
    max_help_position = 80
 | 
			
		||||
 | 
			
		||||
    # No need to wrap help messages if we're on a wide console
 | 
			
		||||
    columns = _find_term_columns()
 | 
			
		||||
    if columns: max_width = columns
 | 
			
		||||
 | 
			
		||||
    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
 | 
			
		||||
    fmt.format_option_strings = _format_option_string
 | 
			
		||||
 | 
			
		||||
    kw = {
 | 
			
		||||
        'version'   : __version__,
 | 
			
		||||
        'formatter' : fmt,
 | 
			
		||||
        'usage' : '%prog [options] url [url...]',
 | 
			
		||||
        'conflict_handler' : 'resolve',
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    parser = optparse.OptionParser(**kw)
 | 
			
		||||
 | 
			
		||||
    # option groups
 | 
			
		||||
    general        = optparse.OptionGroup(parser, 'General Options')
 | 
			
		||||
    selection      = optparse.OptionGroup(parser, 'Video Selection')
 | 
			
		||||
    authentication = optparse.OptionGroup(parser, 'Authentication Options')
 | 
			
		||||
    video_format   = optparse.OptionGroup(parser, 'Video Format Options')
 | 
			
		||||
    postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
 | 
			
		||||
    filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
 | 
			
		||||
    verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
 | 
			
		||||
 | 
			
		||||
    general.add_option('-h', '--help',
 | 
			
		||||
            action='help', help='print this help text and exit')
 | 
			
		||||
    general.add_option('-v', '--version',
 | 
			
		||||
            action='version', help='print program version and exit')
 | 
			
		||||
    general.add_option('-U', '--update',
 | 
			
		||||
            action='store_true', dest='update_self', help='update this program to latest version')
 | 
			
		||||
    general.add_option('-i', '--ignore-errors',
 | 
			
		||||
            action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
 | 
			
		||||
    general.add_option('-r', '--rate-limit',
 | 
			
		||||
            dest='ratelimit', metavar='LIMIT', help='maximum download rate (e.g. 50k or 44.6m)')
 | 
			
		||||
    general.add_option('-R', '--retries',
 | 
			
		||||
            dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
 | 
			
		||||
    general.add_option('--buffer-size',
 | 
			
		||||
            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
 | 
			
		||||
    general.add_option('--no-resize-buffer',
 | 
			
		||||
            action='store_true', dest='noresizebuffer',
 | 
			
		||||
            help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
 | 
			
		||||
    general.add_option('--dump-user-agent',
 | 
			
		||||
            action='store_true', dest='dump_user_agent',
 | 
			
		||||
            help='display the current browser identification', default=False)
 | 
			
		||||
    general.add_option('--user-agent',
 | 
			
		||||
            dest='user_agent', help='specify a custom user agent', metavar='UA')
 | 
			
		||||
    general.add_option('--list-extractors',
 | 
			
		||||
            action='store_true', dest='list_extractors',
 | 
			
		||||
            help='List all supported extractors and the URLs they would handle', default=False)
 | 
			
		||||
    general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
 | 
			
		||||
 | 
			
		||||
    selection.add_option('--playlist-start',
 | 
			
		||||
            dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
 | 
			
		||||
    selection.add_option('--playlist-end',
 | 
			
		||||
            dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
 | 
			
		||||
    selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
 | 
			
		||||
    selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
 | 
			
		||||
    selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
 | 
			
		||||
    selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
 | 
			
		||||
    selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    authentication.add_option('-u', '--username',
 | 
			
		||||
            dest='username', metavar='USERNAME', help='account username')
 | 
			
		||||
    authentication.add_option('-p', '--password',
 | 
			
		||||
            dest='password', metavar='PASSWORD', help='account password')
 | 
			
		||||
    authentication.add_option('-n', '--netrc',
 | 
			
		||||
            action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    video_format.add_option('-f', '--format',
 | 
			
		||||
            action='store', dest='format', metavar='FORMAT', help='video format code')
 | 
			
		||||
    video_format.add_option('--all-formats',
 | 
			
		||||
            action='store_const', dest='format', help='download all available video formats', const='all')
 | 
			
		||||
    video_format.add_option('--prefer-free-formats',
 | 
			
		||||
            action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
 | 
			
		||||
    video_format.add_option('--max-quality',
 | 
			
		||||
            action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
 | 
			
		||||
    video_format.add_option('-F', '--list-formats',
 | 
			
		||||
            action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
 | 
			
		||||
    video_format.add_option('--write-sub', '--write-srt',
 | 
			
		||||
            action='store_true', dest='writesubtitles',
 | 
			
		||||
            help='write subtitle file (currently youtube only)', default=False)
 | 
			
		||||
    video_format.add_option('--only-sub',
 | 
			
		||||
            action='store_true', dest='onlysubtitles',
 | 
			
		||||
            help='downloads only the subtitles (no video)', default=False)
 | 
			
		||||
    video_format.add_option('--all-subs',
 | 
			
		||||
            action='store_true', dest='allsubtitles',
 | 
			
		||||
            help='downloads all the available subtitles of the video (currently youtube only)', default=False)
 | 
			
		||||
    video_format.add_option('--list-subs',
 | 
			
		||||
            action='store_true', dest='listsubtitles',
 | 
			
		||||
            help='lists all available subtitles for the video (currently youtube only)', default=False)
 | 
			
		||||
    video_format.add_option('--sub-format',
 | 
			
		||||
            action='store', dest='subtitlesformat', metavar='LANG',
 | 
			
		||||
            help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt')
 | 
			
		||||
    video_format.add_option('--sub-lang', '--srt-lang',
 | 
			
		||||
            action='store', dest='subtitleslang', metavar='LANG',
 | 
			
		||||
            help='language of the subtitles to download (optional) use IETF language tags like \'en\'')
 | 
			
		||||
 | 
			
		||||
    verbosity.add_option('-q', '--quiet',
 | 
			
		||||
            action='store_true', dest='quiet', help='activates quiet mode', default=False)
 | 
			
		||||
    verbosity.add_option('-s', '--simulate',
 | 
			
		||||
            action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
 | 
			
		||||
    verbosity.add_option('--skip-download',
 | 
			
		||||
            action='store_true', dest='skip_download', help='do not download the video', default=False)
 | 
			
		||||
    verbosity.add_option('-g', '--get-url',
 | 
			
		||||
            action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
 | 
			
		||||
    verbosity.add_option('-e', '--get-title',
 | 
			
		||||
            action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
 | 
			
		||||
    verbosity.add_option('--get-thumbnail',
 | 
			
		||||
            action='store_true', dest='getthumbnail',
 | 
			
		||||
            help='simulate, quiet but print thumbnail URL', default=False)
 | 
			
		||||
    verbosity.add_option('--get-description',
 | 
			
		||||
            action='store_true', dest='getdescription',
 | 
			
		||||
            help='simulate, quiet but print video description', default=False)
 | 
			
		||||
    verbosity.add_option('--get-filename',
 | 
			
		||||
            action='store_true', dest='getfilename',
 | 
			
		||||
            help='simulate, quiet but print output filename', default=False)
 | 
			
		||||
    verbosity.add_option('--get-format',
 | 
			
		||||
            action='store_true', dest='getformat',
 | 
			
		||||
            help='simulate, quiet but print output format', default=False)
 | 
			
		||||
    verbosity.add_option('--newline',
 | 
			
		||||
            action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
 | 
			
		||||
    verbosity.add_option('--no-progress',
 | 
			
		||||
            action='store_true', dest='noprogress', help='do not print progress bar', default=False)
 | 
			
		||||
    verbosity.add_option('--console-title',
 | 
			
		||||
            action='store_true', dest='consoletitle',
 | 
			
		||||
            help='display progress in console titlebar', default=False)
 | 
			
		||||
    verbosity.add_option('-v', '--verbose',
 | 
			
		||||
            action='store_true', dest='verbose', help='print various debugging information', default=False)
 | 
			
		||||
    verbosity.add_option('--dump-intermediate-pages',
 | 
			
		||||
            action='store_true', dest='dump_intermediate_pages', default=False,
 | 
			
		||||
            help='print downloaded pages to debug problems(very verbose)')
 | 
			
		||||
 | 
			
		||||
    filesystem.add_option('-t', '--title',
 | 
			
		||||
            action='store_true', dest='usetitle', help='use title in file name', default=False)
 | 
			
		||||
    filesystem.add_option('--id',
 | 
			
		||||
            action='store_true', dest='useid', help='use video ID in file name', default=False)
 | 
			
		||||
    filesystem.add_option('-l', '--literal',
 | 
			
		||||
            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
 | 
			
		||||
    filesystem.add_option('-A', '--auto-number',
 | 
			
		||||
            action='store_true', dest='autonumber',
 | 
			
		||||
            help='number downloaded files starting from 00000', default=False)
 | 
			
		||||
    filesystem.add_option('-o', '--output',
 | 
			
		||||
            dest='outtmpl', metavar='TEMPLATE',
 | 
			
		||||
            help=('output filename template. Use %(title)s to get the title, '
 | 
			
		||||
                  '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
 | 
			
		||||
                  '%(autonumber)s to get an automatically incremented number, '
 | 
			
		||||
                  '%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
 | 
			
		||||
                  '%(extractor)s for the provider (youtube, metacafe, etc), '
 | 
			
		||||
                  '%(id)s for the video id , %(playlist)s for the playlist the video is in, '
 | 
			
		||||
                  '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
 | 
			
		||||
                  'Use - to output to stdout. Can also be used to download to a different directory, '
 | 
			
		||||
                  'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
 | 
			
		||||
    filesystem.add_option('--autonumber-size',
 | 
			
		||||
            dest='autonumber_size', metavar='NUMBER',
 | 
			
		||||
            help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given')
 | 
			
		||||
    filesystem.add_option('--restrict-filenames',
 | 
			
		||||
            action='store_true', dest='restrictfilenames',
 | 
			
		||||
            help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
 | 
			
		||||
    filesystem.add_option('-a', '--batch-file',
 | 
			
		||||
            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
 | 
			
		||||
    filesystem.add_option('-w', '--no-overwrites',
 | 
			
		||||
            action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
 | 
			
		||||
    filesystem.add_option('-c', '--continue',
 | 
			
		||||
            action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
 | 
			
		||||
    filesystem.add_option('--no-continue',
 | 
			
		||||
            action='store_false', dest='continue_dl',
 | 
			
		||||
            help='do not resume partially downloaded files (restart from beginning)')
 | 
			
		||||
    filesystem.add_option('--cookies',
 | 
			
		||||
            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
 | 
			
		||||
    filesystem.add_option('--no-part',
 | 
			
		||||
            action='store_true', dest='nopart', help='do not use .part files', default=False)
 | 
			
		||||
    filesystem.add_option('--no-mtime',
 | 
			
		||||
            action='store_false', dest='updatetime',
 | 
			
		||||
            help='do not use the Last-modified header to set the file modification time', default=True)
 | 
			
		||||
    filesystem.add_option('--write-description',
 | 
			
		||||
            action='store_true', dest='writedescription',
 | 
			
		||||
            help='write video description to a .description file', default=False)
 | 
			
		||||
    filesystem.add_option('--write-info-json',
 | 
			
		||||
            action='store_true', dest='writeinfojson',
 | 
			
		||||
            help='write video metadata to a .info.json file', default=False)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
 | 
			
		||||
            help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
 | 
			
		||||
    postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
 | 
			
		||||
            help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
 | 
			
		||||
    postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
 | 
			
		||||
            help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
 | 
			
		||||
    postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
 | 
			
		||||
            help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
 | 
			
		||||
    postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
 | 
			
		||||
            help='keeps the video file on disk after the post-processing; the video is erased by default')
 | 
			
		||||
    postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
 | 
			
		||||
            help='do not overwrite post-processed files; the post-processed files are overwritten by default')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    parser.add_option_group(general)
 | 
			
		||||
    parser.add_option_group(selection)
 | 
			
		||||
    parser.add_option_group(filesystem)
 | 
			
		||||
    parser.add_option_group(verbosity)
 | 
			
		||||
    parser.add_option_group(video_format)
 | 
			
		||||
    parser.add_option_group(authentication)
 | 
			
		||||
    parser.add_option_group(postproc)
 | 
			
		||||
 | 
			
		||||
    xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
 | 
			
		||||
    if xdg_config_home:
 | 
			
		||||
        userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
 | 
			
		||||
    else:
 | 
			
		||||
        userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
 | 
			
		||||
    systemConf = _readOptions('/etc/youtube-dl.conf')
 | 
			
		||||
    userConf = _readOptions(userConfFile)
 | 
			
		||||
    commandLineConf = sys.argv[1:]
 | 
			
		||||
    argv = systemConf + userConf + commandLineConf
 | 
			
		||||
    opts, args = parser.parse_args(argv)
 | 
			
		||||
 | 
			
		||||
    if opts.verbose:
 | 
			
		||||
        print(u'[debug] System config: ' + repr(systemConf))
 | 
			
		||||
        print(u'[debug] User config: ' + repr(userConf))
 | 
			
		||||
        print(u'[debug] Command-line args: ' + repr(commandLineConf))
 | 
			
		||||
 | 
			
		||||
    return parser, opts, args
 | 
			
		||||
 | 
			
		||||
def _real_main():
 | 
			
		||||
    parser, opts, args = parseOpts()
 | 
			
		||||
 | 
			
		||||
    # Open appropriate CookieJar
 | 
			
		||||
    if opts.cookiefile is None:
 | 
			
		||||
        jar = compat_cookiejar.CookieJar()
 | 
			
		||||
    else:
 | 
			
		||||
        try:
 | 
			
		||||
            jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
 | 
			
		||||
            if os.access(opts.cookiefile, os.R_OK):
 | 
			
		||||
                jar.load()
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if opts.verbose:
 | 
			
		||||
                traceback.print_exc()
 | 
			
		||||
            sys.stderr.write(u'ERROR: unable to open cookie file\n')
 | 
			
		||||
            sys.exit(101)
 | 
			
		||||
    # Set user agent
 | 
			
		||||
    if opts.user_agent is not None:
 | 
			
		||||
        std_headers['User-Agent'] = opts.user_agent
 | 
			
		||||
 | 
			
		||||
    # Dump user agent
 | 
			
		||||
    if opts.dump_user_agent:
 | 
			
		||||
        print(std_headers['User-Agent'])
 | 
			
		||||
        sys.exit(0)
 | 
			
		||||
 | 
			
		||||
    # Batch file verification
 | 
			
		||||
    batchurls = []
 | 
			
		||||
    if opts.batchfile is not None:
 | 
			
		||||
        try:
 | 
			
		||||
            if opts.batchfile == '-':
 | 
			
		||||
                batchfd = sys.stdin
 | 
			
		||||
            else:
 | 
			
		||||
                batchfd = open(opts.batchfile, 'r')
 | 
			
		||||
            batchurls = batchfd.readlines()
 | 
			
		||||
            batchurls = [x.strip() for x in batchurls]
 | 
			
		||||
            batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
 | 
			
		||||
        except IOError:
 | 
			
		||||
            sys.exit(u'ERROR: batch file could not be read')
 | 
			
		||||
    all_urls = batchurls + args
 | 
			
		||||
    all_urls = [url.strip() for url in all_urls]
 | 
			
		||||
 | 
			
		||||
    # General configuration
 | 
			
		||||
    cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
 | 
			
		||||
    proxy_handler = compat_urllib_request.ProxyHandler()
 | 
			
		||||
    opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
 | 
			
		||||
    compat_urllib_request.install_opener(opener)
 | 
			
		||||
    socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
 | 
			
		||||
 | 
			
		||||
    extractors = gen_extractors()
 | 
			
		||||
 | 
			
		||||
    if opts.list_extractors:
 | 
			
		||||
        for ie in extractors:
 | 
			
		||||
            print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
 | 
			
		||||
            matchedUrls = [url for url in all_urls if ie.suitable(url)]
 | 
			
		||||
            all_urls = [url for url in all_urls if url not in matchedUrls]
 | 
			
		||||
            for mu in matchedUrls:
 | 
			
		||||
                print(u'  ' + mu)
 | 
			
		||||
        sys.exit(0)
 | 
			
		||||
 | 
			
		||||
    # Conflicting, missing and erroneous options
 | 
			
		||||
    if opts.usenetrc and (opts.username is not None or opts.password is not None):
 | 
			
		||||
        parser.error(u'using .netrc conflicts with giving username/password')
 | 
			
		||||
    if opts.password is not None and opts.username is None:
 | 
			
		||||
        parser.error(u'account username missing')
 | 
			
		||||
    if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
 | 
			
		||||
        parser.error(u'using output template conflicts with using title, video ID or auto number')
 | 
			
		||||
    if opts.usetitle and opts.useid:
 | 
			
		||||
        parser.error(u'using title conflicts with using video ID')
 | 
			
		||||
    if opts.username is not None and opts.password is None:
 | 
			
		||||
        opts.password = getpass.getpass(u'Type account password and press return:')
 | 
			
		||||
    if opts.ratelimit is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid rate limit specified')
 | 
			
		||||
        opts.ratelimit = numeric_limit
 | 
			
		||||
    if opts.min_filesize is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid min_filesize specified')
 | 
			
		||||
        opts.min_filesize = numeric_limit
 | 
			
		||||
    if opts.max_filesize is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid max_filesize specified')
 | 
			
		||||
        opts.max_filesize = numeric_limit
 | 
			
		||||
    if opts.retries is not None:
 | 
			
		||||
        try:
 | 
			
		||||
            opts.retries = int(opts.retries)
 | 
			
		||||
        except (TypeError, ValueError) as err:
 | 
			
		||||
            parser.error(u'invalid retry count specified')
 | 
			
		||||
    if opts.buffersize is not None:
 | 
			
		||||
        numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
 | 
			
		||||
        if numeric_buffersize is None:
 | 
			
		||||
            parser.error(u'invalid buffer size specified')
 | 
			
		||||
        opts.buffersize = numeric_buffersize
 | 
			
		||||
    try:
 | 
			
		||||
        opts.playliststart = int(opts.playliststart)
 | 
			
		||||
        if opts.playliststart <= 0:
 | 
			
		||||
            raise ValueError(u'Playlist start must be positive')
 | 
			
		||||
    except (TypeError, ValueError) as err:
 | 
			
		||||
        parser.error(u'invalid playlist start number specified')
 | 
			
		||||
    try:
 | 
			
		||||
        opts.playlistend = int(opts.playlistend)
 | 
			
		||||
        if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
 | 
			
		||||
            raise ValueError(u'Playlist end must be greater than playlist start')
 | 
			
		||||
    except (TypeError, ValueError) as err:
 | 
			
		||||
        parser.error(u'invalid playlist end number specified')
 | 
			
		||||
    if opts.extractaudio:
 | 
			
		||||
        if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
 | 
			
		||||
            parser.error(u'invalid audio format specified')
 | 
			
		||||
    if opts.audioquality:
 | 
			
		||||
        opts.audioquality = opts.audioquality.strip('k').strip('K')
 | 
			
		||||
        if not opts.audioquality.isdigit():
 | 
			
		||||
            parser.error(u'invalid audio quality specified')
 | 
			
		||||
    if opts.recodevideo is not None:
 | 
			
		||||
        if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg']:
 | 
			
		||||
            parser.error(u'invalid video recode format specified')
 | 
			
		||||
 | 
			
		||||
    if sys.version_info < (3,):
 | 
			
		||||
        # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
 | 
			
		||||
        if opts.outtmpl is not None:
 | 
			
		||||
            opts.outtmpl = opts.outtmpl.decode(preferredencoding())
 | 
			
		||||
    outtmpl =((opts.outtmpl is not None and opts.outtmpl)
 | 
			
		||||
            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.useid and u'%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
 | 
			
		||||
            or u'%(id)s.%(ext)s')
 | 
			
		||||
 | 
			
		||||
    # File downloader
 | 
			
		||||
    fd = FileDownloader({
 | 
			
		||||
        'usenetrc': opts.usenetrc,
 | 
			
		||||
        'username': opts.username,
 | 
			
		||||
        'password': opts.password,
 | 
			
		||||
        'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
 | 
			
		||||
        'forceurl': opts.geturl,
 | 
			
		||||
        'forcetitle': opts.gettitle,
 | 
			
		||||
        'forcethumbnail': opts.getthumbnail,
 | 
			
		||||
        'forcedescription': opts.getdescription,
 | 
			
		||||
        'forcefilename': opts.getfilename,
 | 
			
		||||
        'forceformat': opts.getformat,
 | 
			
		||||
        'simulate': opts.simulate,
 | 
			
		||||
        'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
 | 
			
		||||
        'format': opts.format,
 | 
			
		||||
        'format_limit': opts.format_limit,
 | 
			
		||||
        'listformats': opts.listformats,
 | 
			
		||||
        'outtmpl': outtmpl,
 | 
			
		||||
        'autonumber_size': opts.autonumber_size,
 | 
			
		||||
        'restrictfilenames': opts.restrictfilenames,
 | 
			
		||||
        'ignoreerrors': opts.ignoreerrors,
 | 
			
		||||
        'ratelimit': opts.ratelimit,
 | 
			
		||||
        'nooverwrites': opts.nooverwrites,
 | 
			
		||||
        'retries': opts.retries,
 | 
			
		||||
        'buffersize': opts.buffersize,
 | 
			
		||||
        'noresizebuffer': opts.noresizebuffer,
 | 
			
		||||
        'continuedl': opts.continue_dl,
 | 
			
		||||
        'noprogress': opts.noprogress,
 | 
			
		||||
        'progress_with_newline': opts.progress_with_newline,
 | 
			
		||||
        'playliststart': opts.playliststart,
 | 
			
		||||
        'playlistend': opts.playlistend,
 | 
			
		||||
        'logtostderr': opts.outtmpl == '-',
 | 
			
		||||
        'consoletitle': opts.consoletitle,
 | 
			
		||||
        'nopart': opts.nopart,
 | 
			
		||||
        'updatetime': opts.updatetime,
 | 
			
		||||
        'writedescription': opts.writedescription,
 | 
			
		||||
        'writeinfojson': opts.writeinfojson,
 | 
			
		||||
        'writesubtitles': opts.writesubtitles,
 | 
			
		||||
        'onlysubtitles': opts.onlysubtitles,
 | 
			
		||||
        'allsubtitles': opts.allsubtitles,
 | 
			
		||||
        'listsubtitles': opts.listsubtitles,
 | 
			
		||||
        'subtitlesformat': opts.subtitlesformat,
 | 
			
		||||
        'subtitleslang': opts.subtitleslang,
 | 
			
		||||
        'matchtitle': decodeOption(opts.matchtitle),
 | 
			
		||||
        'rejecttitle': decodeOption(opts.rejecttitle),
 | 
			
		||||
        'max_downloads': opts.max_downloads,
 | 
			
		||||
        'prefer_free_formats': opts.prefer_free_formats,
 | 
			
		||||
        'verbose': opts.verbose,
 | 
			
		||||
        'dump_intermediate_pages': opts.dump_intermediate_pages,
 | 
			
		||||
        'test': opts.test,
 | 
			
		||||
        'keepvideo': opts.keepvideo,
 | 
			
		||||
        'min_filesize': opts.min_filesize,
 | 
			
		||||
        'max_filesize': opts.max_filesize
 | 
			
		||||
        })
 | 
			
		||||
 | 
			
		||||
    if opts.verbose:
 | 
			
		||||
        fd.to_screen(u'[debug] youtube-dl version ' + __version__)
 | 
			
		||||
        try:
 | 
			
		||||
            sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
 | 
			
		||||
                                  cwd=os.path.dirname(os.path.abspath(__file__)))
 | 
			
		||||
            out, err = sp.communicate()
 | 
			
		||||
            out = out.decode().strip()
 | 
			
		||||
            if re.match('[0-9a-f]+', out):
 | 
			
		||||
                fd.to_screen(u'[debug] Git HEAD: ' + out)
 | 
			
		||||
        except:
 | 
			
		||||
            pass
 | 
			
		||||
        fd.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform()))
 | 
			
		||||
        fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
 | 
			
		||||
 | 
			
		||||
    for extractor in extractors:
 | 
			
		||||
        fd.add_info_extractor(extractor)
 | 
			
		||||
 | 
			
		||||
    # PostProcessors
 | 
			
		||||
    if opts.extractaudio:
 | 
			
		||||
        fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
 | 
			
		||||
    if opts.recodevideo:
 | 
			
		||||
        fd.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
 | 
			
		||||
 | 
			
		||||
    # Update version
 | 
			
		||||
    if opts.update_self:
 | 
			
		||||
        update_self(fd.to_screen, opts.verbose, sys.argv[0])
 | 
			
		||||
 | 
			
		||||
    # Maybe do nothing
 | 
			
		||||
    if len(all_urls) < 1:
 | 
			
		||||
        if not opts.update_self:
 | 
			
		||||
            parser.error(u'you must provide at least one URL')
 | 
			
		||||
        else:
 | 
			
		||||
            sys.exit()
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        retcode = fd.download(all_urls)
 | 
			
		||||
    except MaxDownloadsReached:
 | 
			
		||||
        fd.to_screen(u'--max-download limit reached, aborting.')
 | 
			
		||||
        retcode = 101
 | 
			
		||||
 | 
			
		||||
    # Dump cookie jar if requested
 | 
			
		||||
    if opts.cookiefile is not None:
 | 
			
		||||
        try:
 | 
			
		||||
            jar.save()
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            sys.exit(u'ERROR: unable to save cookie jar')
 | 
			
		||||
 | 
			
		||||
    sys.exit(retcode)
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    try:
 | 
			
		||||
        _real_main()
 | 
			
		||||
    except DownloadError:
 | 
			
		||||
        sys.exit(1)
 | 
			
		||||
    except SameFileError:
 | 
			
		||||
        sys.exit(u'ERROR: fixed output name but more than one file to download')
 | 
			
		||||
    except KeyboardInterrupt:
 | 
			
		||||
        sys.exit(u'\nERROR: Interrupted by user')
 | 
			
		||||
							
								
								
									
										18
									
								
								youtube_dl/__main__.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										18
									
								
								youtube_dl/__main__.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
# Execute with
 | 
			
		||||
# $ python youtube_dl/__main__.py (2.6+)
 | 
			
		||||
# $ python -m youtube_dl          (2.7+)
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
if __package__ is None and not hasattr(sys, "frozen"):
 | 
			
		||||
    # direct call of __main__.py
 | 
			
		||||
    import os.path
 | 
			
		||||
    path = os.path.realpath(os.path.abspath(__file__))
 | 
			
		||||
    sys.path.append(os.path.dirname(os.path.dirname(path)))
 | 
			
		||||
 | 
			
		||||
import youtube_dl
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    youtube_dl.main()
 | 
			
		||||
							
								
								
									
										168
									
								
								youtube_dl/update.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										168
									
								
								youtube_dl/update.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,168 @@
 | 
			
		||||
import json
 | 
			
		||||
import traceback
 | 
			
		||||
import hashlib
 | 
			
		||||
from zipimport import zipimporter
 | 
			
		||||
 | 
			
		||||
from .utils import *
 | 
			
		||||
from .version import __version__
 | 
			
		||||
 | 
			
		||||
def rsa_verify(message, signature, key):
 | 
			
		||||
    from struct import pack
 | 
			
		||||
    from hashlib import sha256
 | 
			
		||||
    from sys import version_info
 | 
			
		||||
    def b(x):
 | 
			
		||||
        if version_info[0] == 2: return x
 | 
			
		||||
        else: return x.encode('latin1')
 | 
			
		||||
    assert(type(message) == type(b('')))
 | 
			
		||||
    block_size = 0
 | 
			
		||||
    n = key[0]
 | 
			
		||||
    while n:
 | 
			
		||||
        block_size += 1
 | 
			
		||||
        n >>= 8
 | 
			
		||||
    signature = pow(int(signature, 16), key[1], key[0])
 | 
			
		||||
    raw_bytes = []
 | 
			
		||||
    while signature:
 | 
			
		||||
        raw_bytes.insert(0, pack("B", signature & 0xFF))
 | 
			
		||||
        signature >>= 8
 | 
			
		||||
    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
 | 
			
		||||
    if signature[0:2] != b('\x00\x01'): return False
 | 
			
		||||
    signature = signature[2:]
 | 
			
		||||
    if not b('\x00') in signature: return False
 | 
			
		||||
    signature = signature[signature.index(b('\x00'))+1:]
 | 
			
		||||
    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
 | 
			
		||||
    signature = signature[19:]
 | 
			
		||||
    if signature != sha256(message).digest(): return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
def update_self(to_screen, verbose, filename):
 | 
			
		||||
    """Update the program file with the latest version from the repository"""
 | 
			
		||||
 | 
			
		||||
    UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
 | 
			
		||||
    VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
 | 
			
		||||
    JSON_URL = UPDATE_URL + 'versions.json'
 | 
			
		||||
    UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
 | 
			
		||||
        to_screen(u'It looks like you installed youtube-dl with pip, setup.py or a tarball. Please use that to update.')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    # Check if there is a new version
 | 
			
		||||
    try:
 | 
			
		||||
        newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
 | 
			
		||||
    except:
 | 
			
		||||
        if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
        to_screen(u'ERROR: can\'t find the current version. Please try again later.')
 | 
			
		||||
        return
 | 
			
		||||
    if newversion == __version__:
 | 
			
		||||
        to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    # Download and check versions info
 | 
			
		||||
    try:
 | 
			
		||||
        versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
 | 
			
		||||
        versions_info = json.loads(versions_info)
 | 
			
		||||
    except:
 | 
			
		||||
        if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
        to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
 | 
			
		||||
        return
 | 
			
		||||
    if not 'signature' in versions_info:
 | 
			
		||||
        to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
 | 
			
		||||
        return
 | 
			
		||||
    signature = versions_info['signature']
 | 
			
		||||
    del versions_info['signature']
 | 
			
		||||
    if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
 | 
			
		||||
        to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    to_screen(u'Updating to version ' + versions_info['latest'] + '...')
 | 
			
		||||
    version = versions_info['versions'][versions_info['latest']]
 | 
			
		||||
 | 
			
		||||
    print_notes(versions_info['versions'])
 | 
			
		||||
 | 
			
		||||
    if not os.access(filename, os.W_OK):
 | 
			
		||||
        to_screen(u'ERROR: no write permissions on %s' % filename)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    # Py2EXE
 | 
			
		||||
    if hasattr(sys, "frozen"):
 | 
			
		||||
        exe = os.path.abspath(filename)
 | 
			
		||||
        directory = os.path.dirname(exe)
 | 
			
		||||
        if not os.access(directory, os.W_OK):
 | 
			
		||||
            to_screen(u'ERROR: no write permissions on %s' % directory)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            urlh = compat_urllib_request.urlopen(version['exe'][0])
 | 
			
		||||
            newcontent = urlh.read()
 | 
			
		||||
            urlh.close()
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
            to_screen(u'ERROR: unable to download latest version')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        newcontent_hash = hashlib.sha256(newcontent).hexdigest()
 | 
			
		||||
        if newcontent_hash != version['exe'][1]:
 | 
			
		||||
            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            with open(exe + '.new', 'wb') as outf:
 | 
			
		||||
                outf.write(newcontent)
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
            to_screen(u'ERROR: unable to write the new version')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            bat = os.path.join(directory, 'youtube-dl-updater.bat')
 | 
			
		||||
            b = open(bat, 'w')
 | 
			
		||||
            b.write("""
 | 
			
		||||
echo Updating youtube-dl...
 | 
			
		||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
 | 
			
		||||
move /Y "%s.new" "%s"
 | 
			
		||||
del "%s"
 | 
			
		||||
            \n""" %(exe, exe, bat))
 | 
			
		||||
            b.close()
 | 
			
		||||
 | 
			
		||||
            os.startfile(bat)
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
            to_screen(u'ERROR: unable to overwrite current version')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
    # Zip unix package
 | 
			
		||||
    elif isinstance(globals().get('__loader__'), zipimporter):
 | 
			
		||||
        try:
 | 
			
		||||
            urlh = compat_urllib_request.urlopen(version['bin'][0])
 | 
			
		||||
            newcontent = urlh.read()
 | 
			
		||||
            urlh.close()
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
            to_screen(u'ERROR: unable to download latest version')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        newcontent_hash = hashlib.sha256(newcontent).hexdigest()
 | 
			
		||||
        if newcontent_hash != version['bin'][1]:
 | 
			
		||||
            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            with open(filename, 'wb') as outf:
 | 
			
		||||
                outf.write(newcontent)
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            if verbose: to_screen(compat_str(traceback.format_exc()))
 | 
			
		||||
            to_screen(u'ERROR: unable to overwrite current version')
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
    to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
 | 
			
		||||
 | 
			
		||||
def print_notes(versions, fromVersion=__version__):
 | 
			
		||||
    notes = []
 | 
			
		||||
    for v,vdata in sorted(versions.items()):
 | 
			
		||||
        if v > fromVersion:
 | 
			
		||||
            notes.extend(vdata.get('notes', []))
 | 
			
		||||
    if notes:
 | 
			
		||||
        to_screen(u'PLEASE NOTE:')
 | 
			
		||||
        for note in notes:
 | 
			
		||||
            to_screen(note)
 | 
			
		||||
							
								
								
									
										570
									
								
								youtube_dl/utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										570
									
								
								youtube_dl/utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,570 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
import gzip
 | 
			
		||||
import io
 | 
			
		||||
import json
 | 
			
		||||
import locale
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
import traceback
 | 
			
		||||
import zlib
 | 
			
		||||
import email.utils
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.request as compat_urllib_request
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_request
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.error as compat_urllib_error
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_error
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.parse as compat_urllib_parse
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import urllib as compat_urllib_parse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from urllib.parse import urlparse as compat_urllib_parse_urlparse
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    from urlparse import urlparse as compat_urllib_parse_urlparse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import http.cookiejar as compat_cookiejar
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import cookielib as compat_cookiejar
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import html.entities as compat_html_entities
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import htmlentitydefs as compat_html_entities
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import html.parser as compat_html_parser
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import HTMLParser as compat_html_parser
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import http.client as compat_http_client
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import httplib as compat_http_client
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from subprocess import DEVNULL
 | 
			
		||||
    compat_subprocess_get_DEVNULL = lambda: DEVNULL
 | 
			
		||||
except ImportError:
 | 
			
		||||
    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from urllib.parse import parse_qs as compat_parse_qs
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
 | 
			
		||||
    # Python 2's version is apparently totally broken
 | 
			
		||||
    def _unquote(string, encoding='utf-8', errors='replace'):
 | 
			
		||||
        if string == '':
 | 
			
		||||
            return string
 | 
			
		||||
        res = string.split('%')
 | 
			
		||||
        if len(res) == 1:
 | 
			
		||||
            return string
 | 
			
		||||
        if encoding is None:
 | 
			
		||||
            encoding = 'utf-8'
 | 
			
		||||
        if errors is None:
 | 
			
		||||
            errors = 'replace'
 | 
			
		||||
        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
 | 
			
		||||
        pct_sequence = b''
 | 
			
		||||
        string = res[0]
 | 
			
		||||
        for item in res[1:]:
 | 
			
		||||
            try:
 | 
			
		||||
                if not item:
 | 
			
		||||
                    raise ValueError
 | 
			
		||||
                pct_sequence += item[:2].decode('hex')
 | 
			
		||||
                rest = item[2:]
 | 
			
		||||
                if not rest:
 | 
			
		||||
                    # This segment was just a single percent-encoded character.
 | 
			
		||||
                    # May be part of a sequence of code units, so delay decoding.
 | 
			
		||||
                    # (Stored in pct_sequence).
 | 
			
		||||
                    continue
 | 
			
		||||
            except ValueError:
 | 
			
		||||
                rest = '%' + item
 | 
			
		||||
            # Encountered non-percent-encoded characters. Flush the current
 | 
			
		||||
            # pct_sequence.
 | 
			
		||||
            string += pct_sequence.decode(encoding, errors) + rest
 | 
			
		||||
            pct_sequence = b''
 | 
			
		||||
        if pct_sequence:
 | 
			
		||||
            # Flush the final pct_sequence
 | 
			
		||||
            string += pct_sequence.decode(encoding, errors)
 | 
			
		||||
        return string
 | 
			
		||||
 | 
			
		||||
    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
 | 
			
		||||
                encoding='utf-8', errors='replace'):
 | 
			
		||||
        qs, _coerce_result = qs, unicode
 | 
			
		||||
        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
 | 
			
		||||
        r = []
 | 
			
		||||
        for name_value in pairs:
 | 
			
		||||
            if not name_value and not strict_parsing:
 | 
			
		||||
                continue
 | 
			
		||||
            nv = name_value.split('=', 1)
 | 
			
		||||
            if len(nv) != 2:
 | 
			
		||||
                if strict_parsing:
 | 
			
		||||
                    raise ValueError("bad query field: %r" % (name_value,))
 | 
			
		||||
                # Handle case of a control-name with no equal sign
 | 
			
		||||
                if keep_blank_values:
 | 
			
		||||
                    nv.append('')
 | 
			
		||||
                else:
 | 
			
		||||
                    continue
 | 
			
		||||
            if len(nv[1]) or keep_blank_values:
 | 
			
		||||
                name = nv[0].replace('+', ' ')
 | 
			
		||||
                name = _unquote(name, encoding=encoding, errors=errors)
 | 
			
		||||
                name = _coerce_result(name)
 | 
			
		||||
                value = nv[1].replace('+', ' ')
 | 
			
		||||
                value = _unquote(value, encoding=encoding, errors=errors)
 | 
			
		||||
                value = _coerce_result(value)
 | 
			
		||||
                r.append((name, value))
 | 
			
		||||
        return r
 | 
			
		||||
 | 
			
		||||
    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
 | 
			
		||||
                encoding='utf-8', errors='replace'):
 | 
			
		||||
        parsed_result = {}
 | 
			
		||||
        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
 | 
			
		||||
                        encoding=encoding, errors=errors)
 | 
			
		||||
        for name, value in pairs:
 | 
			
		||||
            if name in parsed_result:
 | 
			
		||||
                parsed_result[name].append(value)
 | 
			
		||||
            else:
 | 
			
		||||
                parsed_result[name] = [value]
 | 
			
		||||
        return parsed_result
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    compat_str = unicode # Python 2
 | 
			
		||||
except NameError:
 | 
			
		||||
    compat_str = str
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    compat_chr = unichr # Python 2
 | 
			
		||||
except NameError:
 | 
			
		||||
    compat_chr = chr
 | 
			
		||||
 | 
			
		||||
std_headers = {
 | 
			
		||||
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
 | 
			
		||||
    'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
 | 
			
		||||
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 | 
			
		||||
    'Accept-Encoding': 'gzip, deflate',
 | 
			
		||||
    'Accept-Language': 'en-us,en;q=0.5',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
def preferredencoding():
 | 
			
		||||
    """Get preferred encoding.
 | 
			
		||||
 | 
			
		||||
    Returns the best encoding scheme for the system, based on
 | 
			
		||||
    locale.getpreferredencoding() and some further tweaks.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        pref = locale.getpreferredencoding()
 | 
			
		||||
        u'TEST'.encode(pref)
 | 
			
		||||
    except:
 | 
			
		||||
        pref = 'UTF-8'
 | 
			
		||||
 | 
			
		||||
    return pref
 | 
			
		||||
 | 
			
		||||
if sys.version_info < (3,0):
 | 
			
		||||
    def compat_print(s):
 | 
			
		||||
        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
 | 
			
		||||
else:
 | 
			
		||||
    def compat_print(s):
 | 
			
		||||
        assert type(s) == type(u'')
 | 
			
		||||
        print(s)
 | 
			
		||||
 | 
			
		||||
# In Python 2.x, json.dump expects a bytestream.
 | 
			
		||||
# In Python 3.x, it writes to a character stream
 | 
			
		||||
if sys.version_info < (3,0):
 | 
			
		||||
    def write_json_file(obj, fn):
 | 
			
		||||
        with open(fn, 'wb') as f:
 | 
			
		||||
            json.dump(obj, f)
 | 
			
		||||
else:
 | 
			
		||||
    def write_json_file(obj, fn):
 | 
			
		||||
        with open(fn, 'w', encoding='utf-8') as f:
 | 
			
		||||
            json.dump(obj, f)
 | 
			
		||||
 | 
			
		||||
def htmlentity_transform(matchobj):
 | 
			
		||||
    """Transforms an HTML entity to a character.
 | 
			
		||||
 | 
			
		||||
    This function receives a match object and is intended to be used with
 | 
			
		||||
    the re.sub() function.
 | 
			
		||||
    """
 | 
			
		||||
    entity = matchobj.group(1)
 | 
			
		||||
 | 
			
		||||
    # Known non-numeric HTML entity
 | 
			
		||||
    if entity in compat_html_entities.name2codepoint:
 | 
			
		||||
        return compat_chr(compat_html_entities.name2codepoint[entity])
 | 
			
		||||
 | 
			
		||||
    mobj = re.match(u'(?u)#(x?\\d+)', entity)
 | 
			
		||||
    if mobj is not None:
 | 
			
		||||
        numstr = mobj.group(1)
 | 
			
		||||
        if numstr.startswith(u'x'):
 | 
			
		||||
            base = 16
 | 
			
		||||
            numstr = u'0%s' % numstr
 | 
			
		||||
        else:
 | 
			
		||||
            base = 10
 | 
			
		||||
        return compat_chr(int(numstr, base))
 | 
			
		||||
 | 
			
		||||
    # Unknown entity in name, return its literal representation
 | 
			
		||||
    return (u'&%s;' % entity)
 | 
			
		||||
 | 
			
		||||
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
 | 
			
		||||
class AttrParser(compat_html_parser.HTMLParser):
 | 
			
		||||
    """Modified HTMLParser that isolates a tag with the specified attribute"""
 | 
			
		||||
    def __init__(self, attribute, value):
 | 
			
		||||
        self.attribute = attribute
 | 
			
		||||
        self.value = value
 | 
			
		||||
        self.result = None
 | 
			
		||||
        self.started = False
 | 
			
		||||
        self.depth = {}
 | 
			
		||||
        self.html = None
 | 
			
		||||
        self.watch_startpos = False
 | 
			
		||||
        self.error_count = 0
 | 
			
		||||
        compat_html_parser.HTMLParser.__init__(self)
 | 
			
		||||
 | 
			
		||||
    def error(self, message):
 | 
			
		||||
        if self.error_count > 10 or self.started:
 | 
			
		||||
            raise compat_html_parser.HTMLParseError(message, self.getpos())
 | 
			
		||||
        self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
 | 
			
		||||
        self.error_count += 1
 | 
			
		||||
        self.goahead(1)
 | 
			
		||||
 | 
			
		||||
    def loads(self, html):
 | 
			
		||||
        self.html = html
 | 
			
		||||
        self.feed(html)
 | 
			
		||||
        self.close()
 | 
			
		||||
 | 
			
		||||
    def handle_starttag(self, tag, attrs):
 | 
			
		||||
        attrs = dict(attrs)
 | 
			
		||||
        if self.started:
 | 
			
		||||
            self.find_startpos(None)
 | 
			
		||||
        if self.attribute in attrs and attrs[self.attribute] == self.value:
 | 
			
		||||
            self.result = [tag]
 | 
			
		||||
            self.started = True
 | 
			
		||||
            self.watch_startpos = True
 | 
			
		||||
        if self.started:
 | 
			
		||||
            if not tag in self.depth: self.depth[tag] = 0
 | 
			
		||||
            self.depth[tag] += 1
 | 
			
		||||
 | 
			
		||||
    def handle_endtag(self, tag):
 | 
			
		||||
        if self.started:
 | 
			
		||||
            if tag in self.depth: self.depth[tag] -= 1
 | 
			
		||||
            if self.depth[self.result[0]] == 0:
 | 
			
		||||
                self.started = False
 | 
			
		||||
                self.result.append(self.getpos())
 | 
			
		||||
 | 
			
		||||
    def find_startpos(self, x):
 | 
			
		||||
        """Needed to put the start position of the result (self.result[1])
 | 
			
		||||
        after the opening tag with the requested id"""
 | 
			
		||||
        if self.watch_startpos:
 | 
			
		||||
            self.watch_startpos = False
 | 
			
		||||
            self.result.append(self.getpos())
 | 
			
		||||
    handle_entityref = handle_charref = handle_data = handle_comment = \
 | 
			
		||||
    handle_decl = handle_pi = unknown_decl = find_startpos
 | 
			
		||||
 | 
			
		||||
    def get_result(self):
 | 
			
		||||
        if self.result is None:
 | 
			
		||||
            return None
 | 
			
		||||
        if len(self.result) != 3:
 | 
			
		||||
            return None
 | 
			
		||||
        lines = self.html.split('\n')
 | 
			
		||||
        lines = lines[self.result[1][0]-1:self.result[2][0]]
 | 
			
		||||
        lines[0] = lines[0][self.result[1][1]:]
 | 
			
		||||
        if len(lines) == 1:
 | 
			
		||||
            lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
 | 
			
		||||
        lines[-1] = lines[-1][:self.result[2][1]]
 | 
			
		||||
        return '\n'.join(lines).strip()
 | 
			
		||||
# Hack for https://github.com/rg3/youtube-dl/issues/662
 | 
			
		||||
if sys.version_info < (2, 7, 3):
 | 
			
		||||
    AttrParser.parse_endtag = (lambda self, i:
 | 
			
		||||
        i + len("</scr'+'ipt>")
 | 
			
		||||
        if self.rawdata[i:].startswith("</scr'+'ipt>")
 | 
			
		||||
        else compat_html_parser.HTMLParser.parse_endtag(self, i))
 | 
			
		||||
 | 
			
		||||
def get_element_by_id(id, html):
 | 
			
		||||
    """Return the content of the tag with the specified ID in the passed HTML document"""
 | 
			
		||||
    return get_element_by_attribute("id", id, html)
 | 
			
		||||
 | 
			
		||||
def get_element_by_attribute(attribute, value, html):
 | 
			
		||||
    """Return the content of the tag with the specified attribute in the passed HTML document"""
 | 
			
		||||
    parser = AttrParser(attribute, value)
 | 
			
		||||
    try:
 | 
			
		||||
        parser.loads(html)
 | 
			
		||||
    except compat_html_parser.HTMLParseError:
 | 
			
		||||
        pass
 | 
			
		||||
    return parser.get_result()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def clean_html(html):
 | 
			
		||||
    """Clean an HTML snippet into a readable string"""
 | 
			
		||||
    # Newline vs <br />
 | 
			
		||||
    html = html.replace('\n', ' ')
 | 
			
		||||
    html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
 | 
			
		||||
    html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
 | 
			
		||||
    # Strip html tags
 | 
			
		||||
    html = re.sub('<.*?>', '', html)
 | 
			
		||||
    # Replace html entities
 | 
			
		||||
    html = unescapeHTML(html)
 | 
			
		||||
    return html.strip()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sanitize_open(filename, open_mode):
 | 
			
		||||
    """Try to open the given filename, and slightly tweak it if this fails.
 | 
			
		||||
 | 
			
		||||
    Attempts to open the given filename. If this fails, it tries to change
 | 
			
		||||
    the filename slightly, step by step, until it's either able to open it
 | 
			
		||||
    or it fails and raises a final exception, like the standard open()
 | 
			
		||||
    function.
 | 
			
		||||
 | 
			
		||||
    It returns the tuple (stream, definitive_file_name).
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        if filename == u'-':
 | 
			
		||||
            if sys.platform == 'win32':
 | 
			
		||||
                import msvcrt
 | 
			
		||||
                msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
 | 
			
		||||
            return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
 | 
			
		||||
        stream = open(encodeFilename(filename), open_mode)
 | 
			
		||||
        return (stream, filename)
 | 
			
		||||
    except (IOError, OSError) as err:
 | 
			
		||||
        # In case of error, try to remove win32 forbidden chars
 | 
			
		||||
        filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
 | 
			
		||||
 | 
			
		||||
        # An exception here should be caught in the caller
 | 
			
		||||
        stream = open(encodeFilename(filename), open_mode)
 | 
			
		||||
        return (stream, filename)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def timeconvert(timestr):
 | 
			
		||||
    """Convert RFC 2822 defined time string into system timestamp"""
 | 
			
		||||
    timestamp = None
 | 
			
		||||
    timetuple = email.utils.parsedate_tz(timestr)
 | 
			
		||||
    if timetuple is not None:
 | 
			
		||||
        timestamp = email.utils.mktime_tz(timetuple)
 | 
			
		||||
    return timestamp
 | 
			
		||||
 | 
			
		||||
def sanitize_filename(s, restricted=False, is_id=False):
 | 
			
		||||
    """Sanitizes a string so it could be used as part of a filename.
 | 
			
		||||
    If restricted is set, use a stricter subset of allowed characters.
 | 
			
		||||
    Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
 | 
			
		||||
    """
 | 
			
		||||
    def replace_insane(char):
 | 
			
		||||
        if char == '?' or ord(char) < 32 or ord(char) == 127:
 | 
			
		||||
            return ''
 | 
			
		||||
        elif char == '"':
 | 
			
		||||
            return '' if restricted else '\''
 | 
			
		||||
        elif char == ':':
 | 
			
		||||
            return '_-' if restricted else ' -'
 | 
			
		||||
        elif char in '\\/|*<>':
 | 
			
		||||
            return '_'
 | 
			
		||||
        if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
 | 
			
		||||
            return '_'
 | 
			
		||||
        if restricted and ord(char) > 127:
 | 
			
		||||
            return '_'
 | 
			
		||||
        return char
 | 
			
		||||
 | 
			
		||||
    result = u''.join(map(replace_insane, s))
 | 
			
		||||
    if not is_id:
 | 
			
		||||
        while '__' in result:
 | 
			
		||||
            result = result.replace('__', '_')
 | 
			
		||||
        result = result.strip('_')
 | 
			
		||||
        # Common case of "Foreign band name - English song title"
 | 
			
		||||
        if restricted and result.startswith('-_'):
 | 
			
		||||
            result = result[2:]
 | 
			
		||||
        if not result:
 | 
			
		||||
            result = '_'
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
def orderedSet(iterable):
 | 
			
		||||
    """ Remove all duplicates from the input iterable """
 | 
			
		||||
    res = []
 | 
			
		||||
    for el in iterable:
 | 
			
		||||
        if el not in res:
 | 
			
		||||
            res.append(el)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
def unescapeHTML(s):
 | 
			
		||||
    """
 | 
			
		||||
    @param s a string
 | 
			
		||||
    """
 | 
			
		||||
    assert type(s) == type(u'')
 | 
			
		||||
 | 
			
		||||
    result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
def encodeFilename(s):
 | 
			
		||||
    """
 | 
			
		||||
    @param s The name of the file
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    assert type(s) == type(u'')
 | 
			
		||||
 | 
			
		||||
    # Python 3 has a Unicode API
 | 
			
		||||
    if sys.version_info >= (3, 0):
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
    if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
 | 
			
		||||
        # Pass u'' directly to use Unicode APIs on Windows 2000 and up
 | 
			
		||||
        # (Detecting Windows NT 4 is tricky because 'major >= 4' would
 | 
			
		||||
        # match Windows 9x series as well. Besides, NT 4 is obsolete.)
 | 
			
		||||
        return s
 | 
			
		||||
    else:
 | 
			
		||||
        encoding = sys.getfilesystemencoding()
 | 
			
		||||
        if encoding is None:
 | 
			
		||||
            encoding = 'utf-8'
 | 
			
		||||
        return s.encode(encoding, 'ignore')
 | 
			
		||||
 | 
			
		||||
def decodeOption(optval):
 | 
			
		||||
    if optval is None:
 | 
			
		||||
        return optval
 | 
			
		||||
    if isinstance(optval, bytes):
 | 
			
		||||
        optval = optval.decode(preferredencoding())
 | 
			
		||||
 | 
			
		||||
    assert isinstance(optval, compat_str)
 | 
			
		||||
    return optval
 | 
			
		||||
 | 
			
		||||
class ExtractorError(Exception):
 | 
			
		||||
    """Error during info extraction."""
 | 
			
		||||
    def __init__(self, msg, tb=None):
 | 
			
		||||
        """ tb, if given, is the original traceback (so that it can be printed out). """
 | 
			
		||||
        super(ExtractorError, self).__init__(msg)
 | 
			
		||||
        self.traceback = tb
 | 
			
		||||
        self.exc_info = sys.exc_info()  # preserve original exception
 | 
			
		||||
 | 
			
		||||
    def format_traceback(self):
 | 
			
		||||
        if self.traceback is None:
 | 
			
		||||
            return None
 | 
			
		||||
        return u''.join(traceback.format_tb(self.traceback))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DownloadError(Exception):
 | 
			
		||||
    """Download Error exception.
 | 
			
		||||
 | 
			
		||||
    This exception may be thrown by FileDownloader objects if they are not
 | 
			
		||||
    configured to continue on errors. They will contain the appropriate
 | 
			
		||||
    error message.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, msg, exc_info=None):
 | 
			
		||||
        """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
 | 
			
		||||
        super(DownloadError, self).__init__(msg)
 | 
			
		||||
        self.exc_info = exc_info
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SameFileError(Exception):
 | 
			
		||||
    """Same File exception.
 | 
			
		||||
 | 
			
		||||
    This exception will be thrown by FileDownloader objects if they detect
 | 
			
		||||
    multiple files would have to be downloaded to the same file on disk.
 | 
			
		||||
    """
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PostProcessingError(Exception):
 | 
			
		||||
    """Post Processing exception.
 | 
			
		||||
 | 
			
		||||
    This exception may be raised by PostProcessor's .run() method to
 | 
			
		||||
    indicate an error in the postprocessing task.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, msg):
 | 
			
		||||
        self.msg = msg
 | 
			
		||||
 | 
			
		||||
class MaxDownloadsReached(Exception):
 | 
			
		||||
    """ --max-downloads limit has been reached. """
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class UnavailableVideoError(Exception):
 | 
			
		||||
    """Unavailable Format exception.
 | 
			
		||||
 | 
			
		||||
    This exception will be thrown when a video is requested
 | 
			
		||||
    in a format that is not available for that video.
 | 
			
		||||
    """
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ContentTooShortError(Exception):
 | 
			
		||||
    """Content Too Short exception.
 | 
			
		||||
 | 
			
		||||
    This exception may be raised by FileDownloader objects when a file they
 | 
			
		||||
    download is too small for what the server announced first, indicating
 | 
			
		||||
    the connection was probably interrupted.
 | 
			
		||||
    """
 | 
			
		||||
    # Both in bytes
 | 
			
		||||
    downloaded = None
 | 
			
		||||
    expected = None
 | 
			
		||||
 | 
			
		||||
    def __init__(self, downloaded, expected):
 | 
			
		||||
        self.downloaded = downloaded
 | 
			
		||||
        self.expected = expected
 | 
			
		||||
 | 
			
		||||
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
 | 
			
		||||
    """Handler for HTTP requests and responses.
 | 
			
		||||
 | 
			
		||||
    This class, when installed with an OpenerDirector, automatically adds
 | 
			
		||||
    the standard headers to every HTTP request and handles gzipped and
 | 
			
		||||
    deflated responses from web servers. If compression is to be avoided in
 | 
			
		||||
    a particular request, the original request in the program code only has
 | 
			
		||||
    to include the HTTP header "Youtubedl-No-Compression", which will be
 | 
			
		||||
    removed before making the real request.
 | 
			
		||||
 | 
			
		||||
    Part of this code was copied from:
 | 
			
		||||
 | 
			
		||||
    http://techknack.net/python-urllib2-handlers/
 | 
			
		||||
 | 
			
		||||
    Andrew Rowls, the author of that code, agreed to release it to the
 | 
			
		||||
    public domain.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def deflate(data):
 | 
			
		||||
        try:
 | 
			
		||||
            return zlib.decompress(data, -zlib.MAX_WBITS)
 | 
			
		||||
        except zlib.error:
 | 
			
		||||
            return zlib.decompress(data)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def addinfourl_wrapper(stream, headers, url, code):
 | 
			
		||||
        if hasattr(compat_urllib_request.addinfourl, 'getcode'):
 | 
			
		||||
            return compat_urllib_request.addinfourl(stream, headers, url, code)
 | 
			
		||||
        ret = compat_urllib_request.addinfourl(stream, headers, url)
 | 
			
		||||
        ret.code = code
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def http_request(self, req):
 | 
			
		||||
        for h,v in std_headers.items():
 | 
			
		||||
            if h in req.headers:
 | 
			
		||||
                del req.headers[h]
 | 
			
		||||
            req.add_header(h, v)
 | 
			
		||||
        if 'Youtubedl-no-compression' in req.headers:
 | 
			
		||||
            if 'Accept-encoding' in req.headers:
 | 
			
		||||
                del req.headers['Accept-encoding']
 | 
			
		||||
            del req.headers['Youtubedl-no-compression']
 | 
			
		||||
        if 'Youtubedl-user-agent' in req.headers:
 | 
			
		||||
            if 'User-agent' in req.headers:
 | 
			
		||||
                del req.headers['User-agent']
 | 
			
		||||
            req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
 | 
			
		||||
            del req.headers['Youtubedl-user-agent']
 | 
			
		||||
        return req
 | 
			
		||||
 | 
			
		||||
    def http_response(self, req, resp):
 | 
			
		||||
        old_resp = resp
 | 
			
		||||
        # gzip
 | 
			
		||||
        if resp.headers.get('Content-encoding', '') == 'gzip':
 | 
			
		||||
            gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r')
 | 
			
		||||
            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
 | 
			
		||||
            resp.msg = old_resp.msg
 | 
			
		||||
        # deflate
 | 
			
		||||
        if resp.headers.get('Content-encoding', '') == 'deflate':
 | 
			
		||||
            gz = io.BytesIO(self.deflate(resp.read()))
 | 
			
		||||
            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
 | 
			
		||||
            resp.msg = old_resp.msg
 | 
			
		||||
        return resp
 | 
			
		||||
 | 
			
		||||
    https_request = http_request
 | 
			
		||||
    https_response = http_response
 | 
			
		||||
							
								
								
									
										2
									
								
								youtube_dl/version.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								youtube_dl/version.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
 | 
			
		||||
__version__ = '2013.04.22'
 | 
			
		||||
		Reference in New Issue
	
	Block a user