From: Filippo Valsorda Date: Tue, 9 Oct 2012 08:48:49 +0000 (+0200) Subject: Merge PR #422 from 'kevinamadeus/master' X-Git-Url: https://git.cielonegro.org/gitweb.cgi?a=commitdiff_plain;h=fd873c69a42eb596290ec53e53ac802298128095;p=youtube-dl.git Merge PR #422 from 'kevinamadeus/master' Add InfoExtractor for Google Plus video (with fixes) --- fd873c69a42eb596290ec53e53ac802298128095 diff --cc youtube_dl/InfoExtractors.py index 9e5ea7c61,ddb9fbca1..f97611cb9 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@@ -2988,189 -2956,126 +2988,313 @@@ class MTVIE(InfoExtractor) return [info] + +class YoukuIE(InfoExtractor): + + _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P[A-Za-z0-9]+)\.html' + IE_NAME = u'Youku' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, file_id): + """Report webpage download.""" + self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id) + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id) + + def _gen_sid(self): + nowTime = int(time.time() * 1000) + random1 = random.randint(1000,1998) + random2 = random.randint(1000,9999) + + return "%d%d%d" %(nowTime,random1,random2) + + def _get_file_ID_mix_string(self, seed): + mixed = [] + source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") + seed = float(seed) + for i in range(len(source)): + seed = (seed * 211 + 30031 ) % 65536 + index = math.floor(seed / 65536 * len(source) ) + mixed.append(source[int(index)]) + source.remove(source[int(index)]) + #return ''.join(mixed) + return mixed + + def _get_file_id(self, fileId, seed): + mixed = self._get_file_ID_mix_string(seed) + ids = fileId.split('*') + realId = [] + for ch in ids: + if ch: + realId.append(mixed[int(ch)]) + return ''.join(realId) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group('ID') + + info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id + + request = urllib2.Request(info_url, None, std_headers) + try: + self.report_download_webpage(video_id) + jsondata = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + + self.report_extraction(video_id) + try: + config = json.loads(jsondata) + + video_title = config['data'][0]['title'] + seed = config['data'][0]['seed'] + + format = self._downloader.params.get('format', None) + supported_format = config['data'][0]['streamfileids'].keys() + + if format is None or format == 'best': + if 'hd2' in supported_format: + format = 'hd2' + else: + format = 'flv' + ext = u'flv' + elif format == 'worst': + format = 'mp4' + ext = u'mp4' + else: + format = 'flv' + ext = u'flv' + + + fileid = config['data'][0]['streamfileids'][format] + seg_number = len(config['data'][0]['segs'][format]) + + keys=[] + for i in xrange(seg_number): + keys.append(config['data'][0]['segs'][format][i]['k']) + + #TODO check error + #youku only could be viewed from mainland china + except: + self._downloader.trouble(u'ERROR: unable to extract info section') + return + + files_info=[] + sid = self._gen_sid() + fileid = self._get_file_id(fileid, seed) + + #column 8,9 of fileid represent the segment number + #fileid[7:9] should be changed + for index, key in enumerate(keys): + + temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) + download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key) + + info = { + 'id': '%s_part%02d' % (video_id, index), + 'url': download_url, + 'uploader': None, + 'title': video_title, + 'ext': ext, + 'format': u'NA' + } + files_info.append(info) + + return files_info + + +class XNXXIE(InfoExtractor): + """Information extractor for xnxx.com""" + + _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)' + IE_NAME = u'xnxx' + VIDEO_URL_RE = r'flv_url=(.*?)&' + VIDEO_TITLE_RE = r'(.*?)\s+-\s+XNXX.COM' + VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&' + + def report_webpage(self, video_id): + """Report information extraction""" + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group(1).decode('utf-8') + + self.report_webpage(video_id) + + # Get webpage content + try: + webpage = urllib2.urlopen(url).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) + return + + result = re.search(self.VIDEO_URL_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video url') + return + video_url = urllib.unquote(result.group(1).decode('utf-8')) + + result = re.search(self.VIDEO_TITLE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = result.group(1).decode('utf-8') + + result = re.search(self.VIDEO_THUMB_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + return + video_thumbnail = result.group(1).decode('utf-8') + + info = {'id': video_id, + 'url': video_url, + 'uploader': None, + 'upload_date': None, + 'title': video_title, + 'ext': 'flv', + 'format': 'flv', + 'thumbnail': video_thumbnail, + 'description': None, + 'player_url': None} + + return [info] ++ ++ + class GooglePlusIE(InfoExtractor): + """Information extractor for plus.google.com.""" + - _VALID_URL = r'(?:https://)?plus\.google\.com/(\d+)/posts/(\w+)' ++ _VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)' + IE_NAME = u'plus.google' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_extract_entry(self, url): + """Report downloading extry""" + self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8')) + + def report_date(self, upload_date): + """Report downloading extry""" + self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date) + + def report_uploader(self, uploader): + """Report downloading extry""" + self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8')) + + def report_title(self, video_title): + """Report downloading extry""" + self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8')) + + def report_extract_vid_page(self, video_page): + """Report information extraction.""" + self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8')) + + def _real_extract(self, url): + # Extract id from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + post_url = mobj.group(0) + video_id = mobj.group(2) + + video_extension = 'flv' + + # Step 1, Retrieve post webpage to extract further information ++ self.report_extract_entry(post_url) + request = urllib2.Request(post_url) + try: - self.report_extract_entry(post_url) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % str(err)) + return + + # Extract update date + upload_date = u'NA' + pattern = 'title="Timestamp">(.*?)</a>' + mobj = re.search(pattern, webpage) + if mobj: + upload_date = mobj.group(1) - """Convert timestring to a format suitable for filename""" ++ # Convert timestring to a format suitable for filename + upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") + upload_date = upload_date.strftime('%Y%m%d') + self.report_date(upload_date) + + # Extract uploader + uploader = u'NA' + pattern = r'rel\="author".*?>(.*?)</a>' + mobj = re.search(pattern, webpage) + if mobj: + uploader = mobj.group(1) + self.report_uploader(uploader) + + # Extract title - """Get the first line for title""" ++ # Get the first line for title + video_title = u'NA' - pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\s<"]' ++ pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]' + mobj = re.search(pattern, webpage) + if mobj: + video_title = mobj.group(1) + self.report_title(video_title) + + # Step 2, Stimulate clicking the image box to launch video + pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' + mobj = re.search(pattern, webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video page URL') + + video_page = mobj.group(1) + request = urllib2.Request(video_page) + try: + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) + return + self.report_extract_vid_page(video_page) + + + # Extract video links on video page + """Extract video links of all sizes""" + pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' + mobj = re.findall(pattern, webpage) - if mobj is None: ++ if len(mobj) == 0: + self._downloader.trouble(u'ERROR: unable to extract video links') + + # Sort in resolution + links = sorted(mobj) + + # Choose the lowest of the sort, i.e. highest resolution + video_url = links[-1] + # Only get the url. The resolution part in the tuple has no use anymore + video_url = video_url[-1] + # Treat escaped \u0026 style hex - video_url = unicode(video_url, "unicode_escape").encode("utf8") ++ video_url = unicode(video_url, "unicode_escape") + + + return [{ + 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), ++ 'url': video_url, + 'uploader': uploader.decode('utf-8'), + 'upload_date': upload_date.decode('utf-8'), + 'title': video_title.decode('utf-8'), + 'ext': video_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }] - diff --cc youtube_dl/__init__.py index b21416daf,fc8101f82..15a3ec4cf --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@@ -353,8 -351,7 +353,9 @@@ def gen_extractors() MixcloudIE(), StanfordOpenClassroomIE(), MTVIE(), + YoukuIE(), + XNXXIE(), + GooglePlusIE(), GenericIE() ]