Rename util.u to util.compat_str
This commit is contained in:
		| @@ -332,7 +332,7 @@ class FileDownloader(object): | ||||
| 			template_dict['autonumber'] = u'%05d' % self._num_downloads | ||||
|  | ||||
| 			template_dict = dict((key, u'NA' if val is None else val) for key, val in template_dict.items()) | ||||
| 			template_dict = dict((k, sanitize_filename(u(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items()) | ||||
| 			template_dict = dict((k, sanitize_filename(compat_str(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items()) | ||||
|  | ||||
| 			filename = self.params['outtmpl'] % template_dict | ||||
| 			return filename | ||||
| @@ -403,7 +403,7 @@ class FileDownloader(object): | ||||
| 			if dn != '' and not os.path.exists(dn): # dn is already encoded | ||||
| 				os.makedirs(dn) | ||||
| 		except (OSError, IOError), err: | ||||
| 			self.trouble(u'ERROR: unable to create directory ' + u(err)) | ||||
| 			self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		if self.params.get('writedescription', False): | ||||
|   | ||||
| @@ -253,7 +253,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 				else: | ||||
| 					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) | ||||
| 			except (IOError, netrc.NetrcParseError), err: | ||||
| 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % u(err)) | ||||
| 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 		# Set language | ||||
| @@ -262,7 +262,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 			self.report_lang() | ||||
| 			urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % u(err)) | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# No authentication to be performed | ||||
| @@ -285,7 +285,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 				self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') | ||||
| 				return | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % u(err)) | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Confirm age | ||||
| @@ -298,7 +298,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 			self.report_age_confirmation() | ||||
| 			age_results = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 	def _real_extract(self, url): | ||||
| @@ -320,7 +320,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 		try: | ||||
| 			video_webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Attempt to extract SWF player URL | ||||
| @@ -342,7 +342,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 				if 'token' in video_info: | ||||
| 					break | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
| 		if 'token' not in video_info: | ||||
| 			if 'reason' in video_info: | ||||
| @@ -405,7 +405,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 				try: | ||||
| 					srt_list = urllib2.urlopen(request).read() | ||||
| 				except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 					raise Trouble(u'WARNING: unable to download video subtitles: %s' % u(err)) | ||||
| 					raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) | ||||
| 				srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) | ||||
| 				srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) | ||||
| 				if not srt_lang_list: | ||||
| @@ -422,7 +422,7 @@ class YoutubeIE(InfoExtractor): | ||||
| 				try: | ||||
| 					srt_xml = urllib2.urlopen(request).read() | ||||
| 				except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 					raise Trouble(u'WARNING: unable to download video subtitles: %s' % u(err)) | ||||
| 					raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) | ||||
| 				if not srt_xml: | ||||
| 					raise Trouble(u'WARNING: unable to download video subtitles') | ||||
| 				video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) | ||||
| @@ -544,7 +544,7 @@ class MetacafeIE(InfoExtractor): | ||||
| 			self.report_disclaimer() | ||||
| 			disclaimer = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Confirm age | ||||
| @@ -557,7 +557,7 @@ class MetacafeIE(InfoExtractor): | ||||
| 			self.report_age_confirmation() | ||||
| 			disclaimer = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 	def _real_extract(self, url): | ||||
| @@ -581,7 +581,7 @@ class MetacafeIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract URL, uploader and title from webpage | ||||
| @@ -672,7 +672,7 @@ class DailymotionIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract URL, uploader and title from webpage | ||||
| @@ -768,7 +768,7 @@ class GoogleIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract URL, uploader, and title from webpage | ||||
| @@ -807,7 +807,7 @@ class GoogleIE(InfoExtractor): | ||||
| 			try: | ||||
| 				webpage = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
| 			mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage) | ||||
| 			if mobj is None: | ||||
| @@ -861,7 +861,7 @@ class PhotobucketIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract URL, uploader, and title from webpage | ||||
| @@ -929,7 +929,7 @@ class YahooIE(InfoExtractor): | ||||
| 			try: | ||||
| 				webpage = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) | ||||
| @@ -953,7 +953,7 @@ class YahooIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract uploader and title from webpage | ||||
| @@ -1011,7 +1011,7 @@ class YahooIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract media URL from playlist XML | ||||
| @@ -1067,7 +1067,7 @@ class VimeoIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Now we begin extracting as much information as we can from what we | ||||
| @@ -1173,7 +1173,7 @@ class ArteTvIE(InfoExtractor): | ||||
| 			self.report_download_webpage(url) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
| 		except ValueError, err: | ||||
| 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | ||||
| @@ -1369,7 +1369,7 @@ class GenericIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
| 		except ValueError, err: | ||||
| 			# since this is the last-resort InfoExtractor, if | ||||
| @@ -1488,7 +1488,7 @@ class YoutubeSearchIE(InfoExtractor): | ||||
| 			try: | ||||
| 				data = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download API page: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) | ||||
| 				return | ||||
| 			api_response = json.loads(data)['data'] | ||||
|  | ||||
| @@ -1565,7 +1565,7 @@ class GoogleSearchIE(InfoExtractor): | ||||
| 			try: | ||||
| 				page = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			# Extract video identifiers | ||||
| @@ -1648,7 +1648,7 @@ class YahooSearchIE(InfoExtractor): | ||||
| 			try: | ||||
| 				page = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			# Extract video identifiers | ||||
| @@ -1718,7 +1718,7 @@ class YoutubePlaylistIE(InfoExtractor): | ||||
| 			try: | ||||
| 				page = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			# Extract video identifiers | ||||
| @@ -1775,7 +1775,7 @@ class YoutubeChannelIE(InfoExtractor): | ||||
| 			try: | ||||
| 				page = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			# Extract video identifiers | ||||
| @@ -1838,7 +1838,7 @@ class YoutubeUserIE(InfoExtractor): | ||||
| 			try: | ||||
| 				page = urllib2.urlopen(request).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			# Extract video identifiers | ||||
| @@ -1910,7 +1910,7 @@ class BlipTVUserIE(InfoExtractor): | ||||
| 			mobj = re.search(r'data-users-id="([^"]+)"', page) | ||||
| 			page_base = page_base % mobj.group(1) | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
|  | ||||
| @@ -1998,7 +1998,7 @@ class DepositFilesIE(InfoExtractor): | ||||
| 			self.report_download_webpage(file_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Search for the real file URL | ||||
| @@ -2114,7 +2114,7 @@ class FacebookIE(InfoExtractor): | ||||
| 				else: | ||||
| 					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) | ||||
| 			except (IOError, netrc.NetrcParseError), err: | ||||
| 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % u(err)) | ||||
| 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 		if useremail is None: | ||||
| @@ -2134,7 +2134,7 @@ class FacebookIE(InfoExtractor): | ||||
| 				self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') | ||||
| 				return | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % u(err)) | ||||
| 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 	def _real_extract(self, url): | ||||
| @@ -2151,7 +2151,7 @@ class FacebookIE(InfoExtractor): | ||||
| 			page = urllib2.urlopen(request) | ||||
| 			video_webpage = page.read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Start extracting information | ||||
| @@ -2286,13 +2286,13 @@ class BlipTVIE(InfoExtractor): | ||||
| 					'urlhandle': urlh | ||||
| 				} | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
| 		if info is None: # Regular URL | ||||
| 			try: | ||||
| 				json_code = urlh.read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			try: | ||||
| @@ -2360,7 +2360,7 @@ class MyVideoIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		self.report_extraction(video_id) | ||||
| @@ -2457,7 +2457,7 @@ class ComedyCentralIE(InfoExtractor): | ||||
| 			htmlHandle = urllib2.urlopen(req) | ||||
| 			html = htmlHandle.read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
| 		if dlNewest: | ||||
| 			url = htmlHandle.geturl() | ||||
| @@ -2490,7 +2490,7 @@ class ComedyCentralIE(InfoExtractor): | ||||
| 			urlHandle = urllib2.urlopen(playerUrl_raw) | ||||
| 			playerUrl = urlHandle.geturl() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to find out player URL: ' + u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		uri = mMovieParams[0][1] | ||||
| @@ -2499,7 +2499,7 @@ class ComedyCentralIE(InfoExtractor): | ||||
| 		try: | ||||
| 			indexXml = urllib2.urlopen(indexUrl).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download episode index: ' + u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		results = [] | ||||
| @@ -2520,7 +2520,7 @@ class ComedyCentralIE(InfoExtractor): | ||||
| 			try: | ||||
| 				configXml = urllib2.urlopen(configReq).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			cdoc = xml.etree.ElementTree.fromstring(configXml) | ||||
| @@ -2603,7 +2603,7 @@ class EscapistIE(InfoExtractor): | ||||
| 			m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) | ||||
| 			webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: ' + u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) | ||||
| @@ -2619,7 +2619,7 @@ class EscapistIE(InfoExtractor): | ||||
| 		try: | ||||
| 			configJSON = urllib2.urlopen(configUrl).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download configuration: ' + u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Technically, it's JavaScript, not JSON | ||||
| @@ -2628,7 +2628,7 @@ class EscapistIE(InfoExtractor): | ||||
| 		try: | ||||
| 			config = json.loads(configJSON) | ||||
| 		except (ValueError,), err: | ||||
| 			self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		playlist = config['playlist'] | ||||
| @@ -2675,7 +2675,7 @@ class CollegeHumorIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage) | ||||
| @@ -2696,7 +2696,7 @@ class CollegeHumorIE(InfoExtractor): | ||||
| 		try: | ||||
| 			metaXml = urllib2.urlopen(xmlUrl).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		mdoc = xml.etree.ElementTree.fromstring(metaXml) | ||||
| @@ -2741,7 +2741,7 @@ class XVideosIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		self.report_extraction(video_id) | ||||
| @@ -2825,7 +2825,7 @@ class SoundcloudIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		self.report_extraction('%s/%s' % (uploader, slug_title)) | ||||
| @@ -2860,7 +2860,7 @@ class SoundcloudIE(InfoExtractor): | ||||
| 			try: | ||||
| 				upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d') | ||||
| 			except Exception, e: | ||||
| 				self._downloader.to_stderr(u(e)) | ||||
| 				self._downloader.to_stderr(compat_str(e)) | ||||
|  | ||||
| 		# for soundcloud, a request to a cross domain is required for cookies | ||||
| 		request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) | ||||
| @@ -2902,7 +2902,7 @@ class InfoQIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		self.report_extraction(url) | ||||
| @@ -3014,7 +3014,7 @@ class MixcloudIE(InfoExtractor): | ||||
| 			self.report_download_json(file_url) | ||||
| 			jsonData = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# parse JSON | ||||
| @@ -3093,7 +3093,7 @@ class StanfordOpenClassroomIE(InfoExtractor): | ||||
| 			try: | ||||
| 				metaXml = urllib2.urlopen(xmlUrl).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) | ||||
| 				return | ||||
| 			mdoc = xml.etree.ElementTree.fromstring(metaXml) | ||||
| 			try: | ||||
| @@ -3117,7 +3117,7 @@ class StanfordOpenClassroomIE(InfoExtractor): | ||||
| 			try: | ||||
| 				coursepage = urllib2.urlopen(url).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download course info page: ' + u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			m = re.search('<h1>([^<]+)</h1>', coursepage) | ||||
| @@ -3156,7 +3156,7 @@ class StanfordOpenClassroomIE(InfoExtractor): | ||||
| 			try: | ||||
| 				rootpage = urllib2.urlopen(rootURL).read() | ||||
| 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 				self._downloader.trouble(u'ERROR: unable to download course info page: ' + u(err)) | ||||
| 				self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) | ||||
| 				return | ||||
|  | ||||
| 			info['title'] = info['id'] | ||||
| @@ -3203,7 +3203,7 @@ class MTVIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) | ||||
| @@ -3236,7 +3236,7 @@ class MTVIE(InfoExtractor): | ||||
| 		try: | ||||
| 			metadataXml = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		mdoc = xml.etree.ElementTree.fromstring(metadataXml) | ||||
| @@ -3324,7 +3324,7 @@ class YoukuIE(InfoExtractor): | ||||
| 			self.report_download_webpage(video_id) | ||||
| 			jsondata = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error) as err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		self.report_extraction(video_id) | ||||
| @@ -3498,7 +3498,7 @@ class GooglePlusIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
|  | ||||
| 		# Extract update date | ||||
| @@ -3540,7 +3540,7 @@ class GooglePlusIE(InfoExtractor): | ||||
| 		try: | ||||
| 			webpage = urllib2.urlopen(request).read() | ||||
| 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) | ||||
| 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) | ||||
| 			return | ||||
| 		self.report_extract_vid_page(video_page) | ||||
|  | ||||
|   | ||||
| @@ -27,9 +27,9 @@ std_headers = { | ||||
| } | ||||
|  | ||||
| try: | ||||
| 	u = unicode # Python 2 | ||||
| 	compat_str = unicode # Python 2 | ||||
| except NameError: | ||||
| 	u = str | ||||
| 	compat_str = str | ||||
|  | ||||
| def preferredencoding(): | ||||
| 	"""Get preferred encoding. | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Philipp Hagemeister
					Philipp Hagemeister