Compare commits
19 Commits
2014.08.24
...
2014.08.24
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b8313f07bc | ||
![]() |
92a17d28ac | ||
![]() |
5f90042bd6 | ||
![]() |
9480d1a566 | ||
![]() |
36b0079f23 | ||
![]() |
28028629b9 | ||
![]() |
11f75cac3d | ||
![]() |
e673db0194 | ||
![]() |
ebab4520ff | ||
![]() |
a71d1414eb | ||
![]() |
423817c468 | ||
![]() |
51ed9fce09 | ||
![]() |
d43aeb1d00 | ||
![]() |
4d805e063c | ||
![]() |
165250ff5e | ||
![]() |
83317f6938 | ||
![]() |
8c778adc39 | ||
![]() |
71b6065009 | ||
![]() |
c065fd35ae |
@@ -255,6 +255,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-2, --twofactor TWOFACTOR two-factor auth code
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo, smotri)
|
||||
|
||||
|
@@ -62,6 +62,7 @@ from youtube_dl.extractor import (
|
||||
InstagramUserIE,
|
||||
CSpanIE,
|
||||
AolIE,
|
||||
GameOnePlaylistIE,
|
||||
)
|
||||
|
||||
|
||||
@@ -407,5 +408,6 @@ class TestPlaylists(unittest.TestCase):
|
||||
self.assertEqual(result['id'], 'rbhagwati2')
|
||||
assertGreaterEqual(self, len(result['entries']), 179)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@@ -72,6 +72,7 @@ __authors__ = (
|
||||
'Alexander Kirk',
|
||||
'Erik Johnson',
|
||||
'Keith Beckman',
|
||||
'Ole Ernst',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
@@ -317,6 +318,8 @@ def parseOpts(overrideArguments=None):
|
||||
dest='username', metavar='USERNAME', help='account username')
|
||||
authentication.add_option('-p', '--password',
|
||||
dest='password', metavar='PASSWORD', help='account password')
|
||||
authentication.add_option('-2', '--twofactor',
|
||||
dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
|
||||
authentication.add_option('-n', '--netrc',
|
||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||
authentication.add_option('--video-password',
|
||||
@@ -751,6 +754,7 @@ def _real_main(argv=None):
|
||||
'usenetrc': opts.usenetrc,
|
||||
'username': opts.username,
|
||||
'password': opts.password,
|
||||
'twofactor': opts.twofactor,
|
||||
'videopassword': opts.videopassword,
|
||||
'quiet': (opts.quiet or any_printing),
|
||||
'no_warnings': opts.no_warnings,
|
||||
|
@@ -88,6 +88,7 @@ from .engadget import EngadgetIE
|
||||
from .escapist import EscapistIE
|
||||
from .everyonesmixtape import EveryonesMixtapeIE
|
||||
from .exfm import ExfmIE
|
||||
from .expotv import ExpoTVIE
|
||||
from .extremetube import ExtremeTubeIE
|
||||
from .facebook import FacebookIE
|
||||
from .faz import FazIE
|
||||
@@ -115,7 +116,10 @@ from .freesound import FreesoundIE
|
||||
from .freespeech import FreespeechIE
|
||||
from .funnyordie import FunnyOrDieIE
|
||||
from .gamekings import GamekingsIE
|
||||
from .gameone import GameOneIE
|
||||
from .gameone import (
|
||||
GameOneIE,
|
||||
GameOnePlaylistIE,
|
||||
)
|
||||
from .gamespot import GameSpotIE
|
||||
from .gamestar import GameStarIE
|
||||
from .gametrailers import GametrailersIE
|
||||
|
@@ -440,6 +440,22 @@ class InfoExtractor(object):
|
||||
|
||||
return (username, password)
|
||||
|
||||
def _get_tfa_info(self):
|
||||
"""
|
||||
Get the two-factor authentication info
|
||||
TODO - asking the user will be required for sms/phone verify
|
||||
currently just uses the command line option
|
||||
If there's no info available, return None
|
||||
"""
|
||||
if self._downloader is None:
|
||||
return None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
if downloader_params.get('twofactor', None) is not None:
|
||||
return downloader_params['twofactor']
|
||||
|
||||
return None
|
||||
|
||||
# Helper functions for extracting OpenGraph info
|
||||
@staticmethod
|
||||
def _og_regexes(prop):
|
||||
|
73
youtube_dl/extractor/expotv.py
Normal file
73
youtube_dl/extractor/expotv.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ExpoTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
|
||||
_TEST = {
|
||||
'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561',
|
||||
'md5': '2985e6d7a392b2f7a05e0ca350fe41d0',
|
||||
'info_dict': {
|
||||
'id': '17561',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20060212',
|
||||
'title': 'My Favorite Online Scrapbook Store',
|
||||
'view_count': int,
|
||||
'description': 'You\'ll find most everything you need at this virtual store front.',
|
||||
'uploader': 'Anna T.',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
player_key = self._search_regex(
|
||||
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
|
||||
config_url = 'http://client.expotv.com/video/config/%s/%s' % (
|
||||
video_id, player_key)
|
||||
config = self._download_json(
|
||||
config_url, video_id,
|
||||
note='Downloading video configuration')
|
||||
|
||||
formats = [{
|
||||
'url': fcfg['file'],
|
||||
'height': int_or_none(fcfg.get('height')),
|
||||
'format_note': fcfg.get('label'),
|
||||
'ext': self._search_regex(
|
||||
r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
|
||||
'file extension', default=None),
|
||||
} for fcfg in config['sources']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = config.get('image')
|
||||
view_count = int_or_none(self._search_regex(
|
||||
r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
|
||||
uploader = self._search_regex(
|
||||
r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
|
||||
fatal=False)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
|
||||
fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'view_count': view_count,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
}
|
@@ -88,3 +88,28 @@ class GameOneIE(InfoExtractor):
|
||||
'age_limit': age_limit,
|
||||
'timestamp': timestamp,
|
||||
}
|
||||
|
||||
|
||||
class GameOnePlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?gameone\.de(?:/tv)?/?$'
|
||||
IE_NAME = 'gameone:playlist'
|
||||
_TEST = {
|
||||
'url': 'http://www.gameone.de/tv',
|
||||
'info_dict': {
|
||||
'title': 'GameOne',
|
||||
},
|
||||
'playlist_mincount': 294,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
webpage = self._download_webpage('http://www.gameone.de/tv', 'TV')
|
||||
max_id = max(map(int, re.findall(r'<a href="/tv/(\d+)"', webpage)))
|
||||
entries = [
|
||||
self.url_result('http://www.gameone.de/tv/%d' % video_id, 'GameOne')
|
||||
for video_id in range(max_id, 0, -1)]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': 'GameOne',
|
||||
'entries': entries,
|
||||
}
|
||||
|
@@ -8,9 +8,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..utils import (
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
|
||||
@@ -331,6 +329,18 @@ class GenericIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
|
||||
}
|
||||
},
|
||||
# Flowplayer
|
||||
{
|
||||
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
|
||||
'md5': '9d65602bf31c6e20014319c7d07fba27',
|
||||
'info_dict': {
|
||||
'id': '5123ea6d5e5a7',
|
||||
'ext': 'mp4',
|
||||
'age_limit': 18,
|
||||
'uploader': 'www.handjobhub.com',
|
||||
'title': 'Busty Blonde Siri Tit Fuck While Wank at Handjob Hub',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -344,58 +354,6 @@ class GenericIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
||||
|
||||
def _send_head(self, url):
|
||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||
|
||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||
"""
|
||||
Subclass the HTTPRedirectHandler to make it use our
|
||||
HEADRequest also on the redirected URL
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
if code in (301, 302, 303, 307):
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
try:
|
||||
# This function was deprecated in python 3.3 and removed in 3.4
|
||||
origin_req_host = req.get_origin_req_host()
|
||||
except AttributeError:
|
||||
origin_req_host = req.origin_req_host
|
||||
return HEADRequest(newurl,
|
||||
headers=newheaders,
|
||||
origin_req_host=origin_req_host,
|
||||
unverifiable=True)
|
||||
else:
|
||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||
|
||||
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
||||
"""
|
||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||
"""
|
||||
def http_error_405(self, req, fp, code, msg, headers):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True))
|
||||
|
||||
# Build our opener
|
||||
opener = compat_urllib_request.OpenerDirector()
|
||||
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
|
||||
HTTPMethodFallback, HEADRedirectHandler,
|
||||
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
||||
opener.add_handler(handler())
|
||||
|
||||
response = opener.open(HEADRequest(url))
|
||||
if response is None:
|
||||
raise ExtractorError('Invalid URL protocol')
|
||||
return response
|
||||
|
||||
def _extract_rss(self, url, video_id, doc):
|
||||
playlist_title = doc.find('./channel/title').text
|
||||
playlist_desc_el = doc.find('./channel/description')
|
||||
@@ -499,9 +457,13 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
self.to_screen('%s: Requesting header' % video_id)
|
||||
|
||||
try:
|
||||
response = self._send_head(url)
|
||||
head_req = HEADRequest(url)
|
||||
response = self._request_webpage(
|
||||
head_req, video_id,
|
||||
note=False, errnote='Could not send HEAD request to %s' % url,
|
||||
fatal=False)
|
||||
|
||||
if response is not False:
|
||||
# Check for redirect
|
||||
new_url = response.geturl()
|
||||
if url != new_url:
|
||||
@@ -529,10 +491,6 @@ class GenericIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
except compat_urllib_error.HTTPError:
|
||||
# This may be a stupid server that doesn't like HEAD, our UA, or so
|
||||
pass
|
||||
|
||||
try:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ValueError:
|
||||
@@ -570,6 +528,16 @@ class GenericIE(InfoExtractor):
|
||||
r'(?s)<title>(.*?)</title>', webpage, 'video title',
|
||||
default='video')
|
||||
|
||||
# Try to detect age limit automatically
|
||||
age_limit = self._rta_search(webpage)
|
||||
# And then there are the jokers who advertise that they use RTA,
|
||||
# but actually don't.
|
||||
AGE_LIMIT_MARKERS = [
|
||||
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
|
||||
]
|
||||
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
|
||||
age_limit = 18
|
||||
|
||||
# video uploader is domain name
|
||||
video_uploader = self._search_regex(
|
||||
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
|
||||
@@ -833,6 +801,15 @@ class GenericIE(InfoExtractor):
|
||||
if not found:
|
||||
# Broaden the findall a little bit: JWPlayer JS loader
|
||||
found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
|
||||
if not found:
|
||||
# Flow player
|
||||
found = re.findall(r'''(?xs)
|
||||
flowplayer\("[^"]+",\s*
|
||||
\{[^}]+?\}\s*,
|
||||
\s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
|
||||
["']?url["']?\s*:\s*["']([^"']+)["']
|
||||
''', webpage)
|
||||
assert found
|
||||
if not found:
|
||||
# Try to find twitter cards info
|
||||
found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||
@@ -884,6 +861,7 @@ class GenericIE(InfoExtractor):
|
||||
'url': video_url,
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'age_limit': age_limit,
|
||||
})
|
||||
|
||||
if len(entries) == 1:
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -9,15 +11,16 @@ from ..utils import (
|
||||
|
||||
|
||||
class PornotubeIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
||||
_VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
||||
_TEST = {
|
||||
u'url': u'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
|
||||
u'file': u'1689755.flv',
|
||||
u'md5': u'374dd6dcedd24234453b295209aa69b6',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20090708",
|
||||
u"title": u"Marilyn-Monroe-Bathing",
|
||||
u"age_limit": 18
|
||||
'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
|
||||
'md5': '374dd6dcedd24234453b295209aa69b6',
|
||||
'info_dict': {
|
||||
'id': '1689755',
|
||||
'ext': 'flv',
|
||||
'upload_date': '20090708',
|
||||
'title': 'Marilyn-Monroe-Bathing',
|
||||
'age_limit': 18
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,22 +35,22 @@ class PornotubeIE(InfoExtractor):
|
||||
|
||||
# Get the video URL
|
||||
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
||||
video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
|
||||
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
|
||||
video_url = compat_urllib_parse.unquote(video_url)
|
||||
|
||||
#Get the uploaded date
|
||||
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
||||
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
|
||||
if upload_date: upload_date = unified_strdate(upload_date)
|
||||
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
|
||||
if upload_date:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
age_limit = self._rta_search(webpage)
|
||||
|
||||
info = {'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'format': 'flv',
|
||||
'age_limit': age_limit}
|
||||
|
||||
return [info]
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'format': 'flv',
|
||||
'age_limit': age_limit,
|
||||
}
|
||||
|
@@ -47,7 +47,8 @@ class WatIE(InfoExtractor):
|
||||
video_info = self.download_video_info(real_id)
|
||||
|
||||
if video_info.get('geolock'):
|
||||
raise ExtractorError('This content is not available in your area', expected=True)
|
||||
self.report_warning(
|
||||
'This content is marked as not available in your area. Trying anyway ..')
|
||||
|
||||
chapters = video_info['chapters']
|
||||
first_chapter = chapters[0]
|
||||
|
@@ -37,6 +37,7 @@ from ..utils import (
|
||||
class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
"""Provide base functions for Youtube extractors"""
|
||||
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
|
||||
_TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
|
||||
_LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
||||
_AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
||||
_NETRC_MACHINE = 'youtube'
|
||||
@@ -50,12 +51,19 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
fatal=False))
|
||||
|
||||
def _login(self):
|
||||
"""
|
||||
Attempt to log in to YouTube.
|
||||
True is returned if successful or skipped.
|
||||
False is returned if login failed.
|
||||
|
||||
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
|
||||
"""
|
||||
(username, password) = self._get_login_info()
|
||||
# No authentication to be performed
|
||||
if username is None:
|
||||
if self._LOGIN_REQUIRED:
|
||||
raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
|
||||
return False
|
||||
return True
|
||||
|
||||
login_page = self._download_webpage(
|
||||
self._LOGIN_URL, None,
|
||||
@@ -73,6 +81,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
u'Email': username,
|
||||
u'GALX': galx,
|
||||
u'Passwd': password,
|
||||
|
||||
u'PersistentCookie': u'yes',
|
||||
u'_utf8': u'霱',
|
||||
u'bgresponse': u'js_disabled',
|
||||
@@ -88,6 +97,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
u'uilel': u'3',
|
||||
u'hl': u'en_US',
|
||||
}
|
||||
|
||||
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
|
||||
# chokes on unicode
|
||||
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
|
||||
@@ -99,6 +109,68 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
note=u'Logging in', errnote=u'unable to log in', fatal=False)
|
||||
if login_results is False:
|
||||
return False
|
||||
|
||||
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
|
||||
raise ExtractorError(u'Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
|
||||
|
||||
# Two-Factor
|
||||
# TODO add SMS and phone call support - these require making a request and then prompting the user
|
||||
|
||||
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
|
||||
tfa_code = self._get_tfa_info()
|
||||
|
||||
if tfa_code is None:
|
||||
self._downloader.report_warning(u'Two-factor authentication required. Provide it with --twofactor <code>')
|
||||
self._downloader.report_warning(u'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
|
||||
return False
|
||||
|
||||
# Unlike the first login form, secTok and timeStmp are both required for the TFA form
|
||||
|
||||
match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
|
||||
if match is None:
|
||||
self._downloader.report_warning(u'Failed to get secTok - did the page structure change?')
|
||||
secTok = match.group(1)
|
||||
match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
|
||||
if match is None:
|
||||
self._downloader.report_warning(u'Failed to get timeStmp - did the page structure change?')
|
||||
timeStmp = match.group(1)
|
||||
|
||||
tfa_form_strs = {
|
||||
u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
|
||||
u'smsToken': u'',
|
||||
u'smsUserPin': tfa_code,
|
||||
u'smsVerifyPin': u'Verify',
|
||||
|
||||
u'PersistentCookie': u'yes',
|
||||
u'checkConnection': u'',
|
||||
u'checkedDomains': u'youtube',
|
||||
u'pstMsg': u'1',
|
||||
u'secTok': secTok,
|
||||
u'timeStmp': timeStmp,
|
||||
u'service': u'youtube',
|
||||
u'hl': u'en_US',
|
||||
}
|
||||
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
|
||||
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
|
||||
|
||||
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
|
||||
tfa_results = self._download_webpage(
|
||||
tfa_req, None,
|
||||
note=u'Submitting TFA code', errnote=u'unable to submit tfa', fatal=False)
|
||||
|
||||
if tfa_results is False:
|
||||
return False
|
||||
|
||||
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
|
||||
self._downloader.report_warning(u'Two-factor code expired. Please try again, or use a one-use backup code instead.')
|
||||
return False
|
||||
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
|
||||
self._downloader.report_warning(u'unable to log in - did the page structure change?')
|
||||
return False
|
||||
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
|
||||
self._downloader.report_warning(u'Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
|
||||
return False
|
||||
|
||||
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
|
||||
self._downloader.report_warning(u'unable to log in: bad username or password')
|
||||
return False
|
||||
|
@@ -855,6 +855,7 @@ def unified_strdate(date_str):
|
||||
'%Y/%m/%d',
|
||||
'%d.%m.%Y',
|
||||
'%d/%m/%Y',
|
||||
'%d/%m/%y',
|
||||
'%Y/%m/%d %H:%M:%S',
|
||||
'%Y-%m-%d %H:%M:%S',
|
||||
'%d.%m.%Y %H:%M',
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.08.24.2'
|
||||
__version__ = '2014.08.24.6'
|
||||
|
Reference in New Issue
Block a user