X-Git-Url: http://git.cielonegro.org/gitweb.cgi?a=blobdiff_plain;f=youtube_dl%2FInfoExtractors.py;h=baf859ea8beac248c7cc9caf0a525a71ae4368fb;hb=bae611f216ac7b1f1a24a506da6dffc518d09d5b;hp=8c5b7256788f966804cfa2da57dee020f47c1fec;hpb=58ca755f4062146fbb9ce9d4383f033b8a631ed0;p=youtube-dl.git diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 8c5b72567..baf859ea8 100644 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -12,29 +12,15 @@ import time import urllib import urllib2 import email.utils +import xml.etree.ElementTree +from urlparse import parse_qs try: import cStringIO as StringIO except ImportError: import StringIO -# parse_qs was moved from the cgi module to the urlparse module recently. -try: - from urlparse import parse_qs -except ImportError: - from cgi import parse_qs - -try: - import lxml.etree -except ImportError: - pass # Handled below - -try: - import xml.etree.ElementTree -except ImportError: # Python<2.5: Not officially supported, but let it slip - warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.') - -from Utils import * +from utils import * class InfoExtractor(object): @@ -53,7 +39,6 @@ class InfoExtractor(object): url: Final video URL. uploader: Nickname of the video uploader. title: Literal title. - stitle: Simplified title. ext: Video filename extension. format: Video format. player_url: SWF Player URL (may be None). @@ -117,8 +102,8 @@ class YoutubeIE(InfoExtractor): _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' _NETRC_MACHINE = 'youtube' # Listed in order of quality - _available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] - _available_formats_prefer_free = ['38', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] + _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] + _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] _video_extensions = { '13': '3gp', '17': 'mp4', @@ -129,6 +114,7 @@ class YoutubeIE(InfoExtractor): '43': 'webm', '44': 'webm', '45': 'webm', + '46': 'webm', } _video_dimensions = { '5': '240x400', @@ -144,6 +130,7 @@ class YoutubeIE(InfoExtractor): '43': '360x640', '44': '480x854', '45': '720x1280', + '46': '1080x1920', } IE_NAME = u'youtube' @@ -193,9 +180,9 @@ class YoutubeIE(InfoExtractor): end = start + float(dur) start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) - caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) - caption = re.sub(ur'(?u)&(.+?);', htmlentity_transform, caption) # double cycle, inentional - srt += str(n) + '\n' + caption = unescapeHTML(caption) + caption = unescapeHTML(caption) # double cycle, intentional + srt += str(n+1) + '\n' srt += start + ' --> ' + end + '\n' srt += caption + '\n\n' return srt @@ -324,6 +311,11 @@ class YoutubeIE(InfoExtractor): self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') return + # Check for "rental" videos + if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: + self._downloader.trouble(u'ERROR: "rental" videos not supported') + return + # Start extracting information self.report_information_extraction(video_id) @@ -339,10 +331,6 @@ class YoutubeIE(InfoExtractor): return video_title = urllib.unquote_plus(video_info['title'][0]) video_title = video_title.decode('utf-8') - video_title = sanitize_title(video_title) - - # simplified title - simple_title = simplify_title(video_title) # thumbnail image if 'thumbnail_url' not in video_info: @@ -364,49 +352,39 @@ class YoutubeIE(InfoExtractor): pass # description - try: - lxml.etree - except NameError: - video_description = u'No description available.' - mobj = re.search(r'', video_webpage) - if mobj is not None: - video_description = mobj.group(1).decode('utf-8') - else: - html_parser = lxml.etree.HTMLParser(encoding='utf-8') - vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser) - video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()')) - # TODO use another parser + video_description = get_element_by_id("eow-description", video_webpage.decode('utf8')) + if video_description: video_description = clean_html(video_description) + else: video_description = '' # closed captions video_subtitles = None if self._downloader.params.get('writesubtitles', False): - self.report_video_subtitles_download(video_id) - request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) try: - srt_list = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - else: + self.report_video_subtitles_download(video_id) + request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) srt_lang_list = re.findall(r'lang_code="([\w\-]+)"', srt_list) - if srt_lang_list: - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list[0] - if not srt_lang in srt_lang_list: - self._downloader.trouble(u'WARNING: no closed captions found in the specified language') - else: - request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id)) - try: - srt_xml = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - else: - video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) + if not srt_lang_list: + raise Trouble(u'WARNING: video has no closed captions') + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' else: - self._downloader.trouble(u'WARNING: video has no closed captions') + srt_lang = srt_lang_list[0] + if not srt_lang in srt_lang_list: + raise Trouble(u'WARNING: no closed captions found in the specified language') + request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id)) + try: + srt_xml = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) + video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) + except Trouble as trouble: + self._downloader.trouble(trouble[0]) # token video_token = urllib.unquote_plus(video_info['token'][0]) @@ -469,7 +447,6 @@ class YoutubeIE(InfoExtractor): 'uploader': video_uploader.decode('utf-8'), 'upload_date': upload_date, 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'thumbnail': video_thumbnail.decode('utf-8'), @@ -545,8 +522,6 @@ class MetacafeIE(InfoExtractor): self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) return - simple_title = mobj.group(2).decode('utf-8') - # Retrieve video webpage to extract further information request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) try: @@ -592,7 +567,6 @@ class MetacafeIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract title') return video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) mobj = re.search(r'(?ms)By:\s*(.+?)<', webpage) if mobj is None: @@ -606,7 +580,6 @@ class MetacafeIE(InfoExtractor): 'uploader': video_uploader.decode('utf-8'), 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -673,8 +646,6 @@ class DailymotionIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract title') return video_title = unescapeHTML(mobj.group('title').decode('utf-8')) - video_title = sanitize_title(video_title) - simple_title = simplify_title(video_title) mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) if mobj is None: @@ -688,7 +659,6 @@ class DailymotionIE(InfoExtractor): 'uploader': video_uploader.decode('utf-8'), 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -752,8 +722,6 @@ class GoogleIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract title') return video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = simplify_title(video_title) # Extract video description mobj = re.search(r'([^<]*)', webpage) @@ -786,7 +754,6 @@ class GoogleIE(InfoExtractor): 'uploader': u'NA', 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -845,8 +812,6 @@ class PhotobucketIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract title') return video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = simplify_title(video_title) video_uploader = mobj.group(2).decode('utf-8') @@ -856,7 +821,6 @@ class PhotobucketIE(InfoExtractor): 'uploader': video_uploader, 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -934,7 +898,6 @@ class YahooIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract video title') return video_title = mobj.group(1).decode('utf-8') - simple_title = simplify_title(video_title) mobj = re.search(r'

(.*)

', webpage) if mobj is None: @@ -992,7 +955,7 @@ class YahooIE(InfoExtractor): self._downloader.trouble(u'ERROR: Unable to extract media URL') return video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') - video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url) + video_url = unescapeHTML(video_url) return [{ 'id': video_id.decode('utf-8'), @@ -1000,7 +963,6 @@ class YahooIE(InfoExtractor): 'uploader': video_uploader, 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'thumbnail': video_thumbnail.decode('utf-8'), 'description': video_description, @@ -1060,7 +1022,6 @@ class VimeoIE(InfoExtractor): # Extract title video_title = config["video"]["title"] - simple_title = simplify_title(video_title) # Extract uploader video_uploader = config["video"]["owner"]["name"] @@ -1069,18 +1030,9 @@ class VimeoIE(InfoExtractor): video_thumbnail = config["video"]["thumbnail"] # Extract video description - try: - lxml.etree - except NameError: - video_description = u'No description available.' - mobj = re.search(r'', webpage, re.MULTILINE) - if mobj is not None: - video_description = mobj.group(1) - else: - html_parser = lxml.etree.HTMLParser() - vwebpage_doc = lxml.etree.parse(StringIO.StringIO(webpage), html_parser) - video_description = u''.join(vwebpage_doc.xpath('id("description")//text()')).strip() - # TODO use another parser + video_description = get_element_by_id("description", webpage.decode('utf8')) + if video_description: video_description = clean_html(video_description) + else: video_description = '' # Extract upload date video_upload_date = u'NA' @@ -1115,7 +1067,6 @@ class VimeoIE(InfoExtractor): 'uploader': video_uploader, 'upload_date': video_upload_date, 'title': video_title, - 'stitle': simple_title, 'ext': video_extension, 'thumbnail': video_thumbnail, 'description': video_description, @@ -1158,16 +1109,16 @@ class GenericIE(InfoExtractor): """ def redirect_request(self, req, fp, code, msg, headers, newurl): if code in (301, 302, 303, 307): - newurl = newurl.replace(' ', '%20') - newheaders = dict((k,v) for k,v in req.headers.items() - if k.lower() not in ("content-length", "content-type")) - return HeadRequest(newurl, - headers=newheaders, - origin_req_host=req.get_origin_req_host(), - unverifiable=True) + newurl = newurl.replace(' ', '%20') + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type")) + return HeadRequest(newurl, + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True) else: - raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) - + raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) + class HTTPMethodFallback(urllib2.BaseHandler): """ Fallback to GET if HEAD is not allowed (405 HTTP error) @@ -1177,17 +1128,17 @@ class GenericIE(InfoExtractor): fp.close() newheaders = dict((k,v) for k,v in req.headers.items() - if k.lower() not in ("content-length", "content-type")) + if k.lower() not in ("content-length", "content-type")) return self.parent.open(urllib2.Request(req.get_full_url(), - headers=newheaders, - origin_req_host=req.get_origin_req_host(), - unverifiable=True)) + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True)) # Build our opener opener = urllib2.OpenerDirector() for handler in [urllib2.HTTPHandler, urllib2.HTTPDefaultErrorHandler, - HTTPMethodFallback, HEADRedirectHandler, - urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]: + HTTPMethodFallback, HEADRedirectHandler, + urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]: opener.add_handler(handler()) response = opener.open(HeadRequest(url)) @@ -1250,8 +1201,6 @@ class GenericIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract title') return video_title = mobj.group(1).decode('utf-8') - video_title = sanitize_title(video_title) - simple_title = simplify_title(video_title) # video uploader is domain name mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) @@ -1266,7 +1215,6 @@ class GenericIE(InfoExtractor): 'uploader': video_uploader, 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -1284,7 +1232,7 @@ class YoutubeSearchIE(InfoExtractor): InfoExtractor.__init__(self, downloader) def report_download_page(self, query, pagenum): - """Report attempt to download playlist page with given number.""" + """Report attempt to download search page with given number.""" query = query.decode(preferredencoding()) self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) @@ -1520,8 +1468,8 @@ class YoutubePlaylistIE(InfoExtractor): _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' - _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&' - _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' + _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=(PL)?%s&' + _MORE_PAGES_INDICATOR = r'yt-uix-pager-next' IE_NAME = u'youtube:playlist' def __init__(self, downloader=None): @@ -1731,7 +1679,6 @@ class DepositFilesIE(InfoExtractor): 'uploader': u'NA', 'upload_date': u'NA', 'title': file_title, - 'stitle': file_title, 'ext': file_extension.decode('utf-8'), 'format': u'NA', 'player_url': None, @@ -1876,9 +1823,6 @@ class FacebookIE(InfoExtractor): return video_title = video_info['title'] video_title = video_title.decode('utf-8') - video_title = sanitize_title(video_title) - - simple_title = simplify_title(video_title) # thumbnail image if 'thumbnail' not in video_info: @@ -1939,7 +1883,6 @@ class FacebookIE(InfoExtractor): 'uploader': video_uploader.decode('utf-8'), 'upload_date': upload_date, 'title': video_title, - 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'thumbnail': video_thumbnail.decode('utf-8'), @@ -1989,7 +1932,6 @@ class BlipTVIE(InfoExtractor): 'id': title, 'url': url, 'title': title, - 'stitle': simplify_title(title), 'ext': ext, 'urlhandle': urlh } @@ -2009,21 +1951,20 @@ class BlipTVIE(InfoExtractor): data = json_data['Post'] else: data = json_data - + upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') video_url = data['media']['url'] umobj = re.match(self._URL_EXT, video_url) if umobj is None: raise ValueError('Can not determine filename extension') ext = umobj.group(1) - + info = { 'id': data['item_id'], 'url': video_url, 'uploader': data['display_name'], 'upload_date': upload_date, 'title': data['title'], - 'stitle': simplify_title(data['title']), 'ext': ext, 'format': data['media']['mimeType'], 'thumbnail': data['thumbnailUrl'], @@ -2085,9 +2026,6 @@ class MyVideoIE(InfoExtractor): return video_title = mobj.group(1) - video_title = sanitize_title(video_title) - - simple_title = simplify_title(video_title) return [{ 'id': video_id, @@ -2095,7 +2033,6 @@ class MyVideoIE(InfoExtractor): 'uploader': u'NA', 'upload_date': u'NA', 'title': video_title, - 'stitle': simple_title, 'ext': u'flv', 'format': u'NA', 'player_url': None, @@ -2109,7 +2046,7 @@ class ComedyCentralIE(InfoExtractor): def report_extraction(self, episode_id): self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) - + def report_config_download(self, episode_id): self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) @@ -2222,7 +2159,6 @@ class ComedyCentralIE(InfoExtractor): 'uploader': showId, 'upload_date': officialDate, 'title': effTitle, - 'stitle': simplify_title(effTitle), 'ext': 'mp4', 'format': format, 'thumbnail': None, @@ -2248,8 +2184,6 @@ class EscapistIE(InfoExtractor): self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) def _real_extract(self, url): - htmlParser = HTMLParser.HTMLParser() - mobj = re.match(self._VALID_URL, url) if mobj is None: self._downloader.trouble(u'ERROR: invalid URL: %s' % url) @@ -2259,17 +2193,18 @@ class EscapistIE(InfoExtractor): self.report_extraction(showName) try: - webPage = urllib2.urlopen(url).read() + webPageBytes = urllib2.urlopen(url).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) return + webPage = webPageBytes.decode('utf-8') descMatch = re.search('([^<]+)', coursepage) if m: @@ -2822,8 +2740,6 @@ class StanfordOpenClassroomIE(InfoExtractor): return results else: # Root page - unescapeHTML = HTMLParser.HTMLParser().unescape - info = { 'id': 'Stanford OpenClassroom', 'type': 'playlist', @@ -2838,7 +2754,6 @@ class StanfordOpenClassroomIE(InfoExtractor): return info['title'] = info['id'] - info['stitle'] = simplify_title(info['title']) links = orderedSet(re.findall('', rootpage)) info['list'] = [ @@ -2937,7 +2852,6 @@ class MTVIE(InfoExtractor): 'url': video_url, 'uploader': performer, 'title': video_title, - 'stitle': simplify_title(video_title), 'ext': ext, 'format': format, }