]> gitweb @ CieloNegro.org - youtube-dl.git/blobdiff - youtube_dl/InfoExtractors.py
Merge remote-tracking branch 'jefftimesten/master'
[youtube-dl.git] / youtube_dl / InfoExtractors.py
index 905e212b099319f96415a8af56c63fc27d28ecd4..092bfef22ba7cdf7e4847ebc562bae0814037141 100755 (executable)
@@ -14,10 +14,6 @@ import email.utils
 import xml.etree.ElementTree
 import random
 import math
-import urllib
-import urllib2
-import httplib
-from urlparse import parse_qs, urlparse
 
 from .utils import *
 
@@ -2337,7 +2333,6 @@ class ComedyCentralIE(InfoExtractor):
                               (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
                               |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
                      $"""
-    IE_NAME = u'comedycentral'
 
     _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
 
@@ -2365,16 +2360,12 @@ class ComedyCentralIE(InfoExtractor):
     def report_extraction(self, episode_id):
         self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
 
-    def report_config_download(self, episode_id):
-        self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+    def report_config_download(self, episode_id, media_id):
+        self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration for %s' % (episode_id, media_id))
 
     def report_index_download(self, episode_id):
         self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
 
-    def report_player_url(self, episode_id):
-        self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
-
-
     def _print_formats(self, formats):
         print('Available formats:')
         for x in formats:
@@ -2413,6 +2404,7 @@ class ComedyCentralIE(InfoExtractor):
         try:
             htmlHandle = compat_urllib_request.urlopen(req)
             html = htmlHandle.read()
+            webpage = html.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
             return
@@ -2427,29 +2419,20 @@ class ComedyCentralIE(InfoExtractor):
                 return
             epTitle = mobj.group('episode')
 
-        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html)
+        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
 
         if len(mMovieParams) == 0:
             # The Colbert Report embeds the information in a without
             # a URL prefix; so extract the alternate reference
             # and then add the URL prefix manually.
 
-            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html)
+            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
             if len(altMovieParams) == 0:
                 self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
                 return
             else:
                 mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
 
-        playerUrl_raw = mMovieParams[0][0]
-        self.report_player_url(epTitle)
-        try:
-            urlHandle = compat_urllib_request.urlopen(playerUrl_raw)
-            playerUrl = urlHandle.geturl()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
-            return
-
         uri = mMovieParams[0][1]
         indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
         self.report_index_download(epTitle)
@@ -2463,7 +2446,7 @@ class ComedyCentralIE(InfoExtractor):
 
         idoc = xml.etree.ElementTree.fromstring(indexXml)
         itemEls = idoc.findall('.//item')
-        for itemEl in itemEls:
+        for partNum,itemEl in enumerate(itemEls):
             mediaId = itemEl.findall('./guid')[0].text
             shortMediaId = mediaId.split(':')[-1]
             showId = mediaId.split(':')[-2].replace('.com', '')
@@ -2473,7 +2456,7 @@ class ComedyCentralIE(InfoExtractor):
             configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
                         compat_urllib_parse.urlencode({'uri': mediaId}))
             configReq = compat_urllib_request.Request(configUrl)
-            self.report_config_download(epTitle)
+            self.report_config_download(epTitle, shortMediaId)
             try:
                 configXml = compat_urllib_request.urlopen(configReq).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -2495,7 +2478,7 @@ class ComedyCentralIE(InfoExtractor):
                 return
 
             # For now, just pick the highest bitrate
-            format,video_url = turls[-1]
+            format,rtmp_video_url = turls[-1]
 
             # Get the format arg from the arg stream
             req_format = self._downloader.params.get('format', None)
@@ -2503,18 +2486,16 @@ class ComedyCentralIE(InfoExtractor):
             # Select format if we can find one
             for f,v in turls:
                 if f == req_format:
-                    format, video_url = f, v
+                    format, rtmp_video_url = f, v
                     break
 
-            # Patch to download from alternative CDN, which does not
-            # break on current RTMPDump builds
-            broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/"
-            better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/"
+            m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
+            if not m:
+                raise ExtractorError(u'Cannot transform RTMP url')
+            base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
+            video_url = base + m.group('finalid')
 
-            if video_url.startswith(broken_cdn):
-                video_url = video_url.replace(broken_cdn, better_cdn)
-
-            effTitle = showId + u'-' + epTitle
+            effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
             info = {
                 'id': shortMediaId,
                 'url': video_url,
@@ -2525,9 +2506,7 @@ class ComedyCentralIE(InfoExtractor):
                 'format': format,
                 'thumbnail': None,
                 'description': officialTitle,
-                'player_url': None #playerUrl
             }
-
             results.append(info)
 
         return results
@@ -2607,7 +2586,6 @@ class EscapistIE(InfoExtractor):
 
         return [info]
 
-
 class CollegeHumorIE(InfoExtractor):
     """Information extractor for collegehumor.com"""
 
@@ -3546,17 +3524,23 @@ class JustinTVIE(InfoExtractor):
             return
 
         response = json.loads(webpage)
+        if type(response) != list:
+            error_text = response.get('error', 'unknown error')
+            self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+            return
         info = []
         for clip in response:
             video_url = clip['video_file_url']
             if video_url:
                 video_extension = os.path.splitext(video_url)[1][1:]
-                video_date = re.sub('-', '', clip['created_on'][:10])
+                video_date = re.sub('-', '', clip['start_time'][:10])
+                video_uploader_id = clip.get('user_id', clip.get('channel_id'))
                 info.append({
                     'id': clip['id'],
                     'url': video_url,
                     'title': clip['title'],
-                    'uploader': clip.get('user_id', clip.get('channel_id')),
+                    'uploader': clip.get('channel_name', video_uploader_id),
+                    'uploader_id': video_uploader_id,
                     'upload_date': video_date,
                     'ext': video_extension,
                 })
@@ -3575,7 +3559,7 @@ class JustinTVIE(InfoExtractor):
             paged = True
             api += '/channel/archives/%s.json'
         else:
-            api += '/clip/show/%s.json'
+            api += '/broadcast/by_archive/%s.json'
         api = api % (video_id,)
 
         self.report_extraction(video_id)
@@ -3715,11 +3699,11 @@ class SteamIE(InfoExtractor):
                   }
             videos.append(info)
         return videos
-        
+
 class UstreamIE(InfoExtractor):
-    _VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)'
+    _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
     IE_NAME = u'ustream'
-    
+
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         video_id = m.group('videoID')
@@ -3744,43 +3728,37 @@ class YouPornIE(InfoExtractor):
     """Information extractor for youporn.com."""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
-    IE_NAME = u'youporn'
-    VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
-    VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
-    VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
-    DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
-    LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
-
+   
     def __init__(self, downloader=None):
         InfoExtractor.__init__(self, downloader)
 
-    def report_id(self, video_id):
-        """Report finding video ID"""
-        self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
+    def report_id(self, video_id):
+        """Report finding video ID"""
+        self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
 
-    def report_webpage(self, url):
-        """Report downloading page"""
-        self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
+    def report_webpage(self, url):
+        """Report downloading page"""
+        self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
 
-    def report_title(self, video_title):
-        """Report dfinding title"""
-        self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
+    def report_title(self, video_title):
+        """Report dfinding title"""
+        self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
     
-    def report_uploader(self, uploader):
-        """Report dfinding title"""
-        self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
+    def report_uploader(self, uploader):
+        """Report dfinding title"""
+        self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
 
-    def report_upload_date(self, video_date):
-        """Report finding date"""
-        self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
+    def report_upload_date(self, video_date):
+        """Report finding date"""
+        self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
 
     def _print_formats(self, formats):
         """Print all available formats"""
-        print 'Available formats:'
-        print u'ext\t\tformat'
-        print u'---------------------------------'
+        print('Available formats:')
+        print(u'ext\t\tformat')
+        print(u'---------------------------------')
         for format in formats:
-            print u'%s\t\t%s'  % (format['ext'], format['format'])
+            print(u'%s\t\t%s'  % (format['ext'], format['format']))
 
     def _specific(self, req_format, formats):
         for x in formats:
@@ -3788,58 +3766,57 @@ class YouPornIE(InfoExtractor):
                 return x
         return None
 
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
             return
 
-        video_id = mobj.group('videoid').decode('utf-8')
-        self.report_id(video_id)
+        video_id = mobj.group('videoid')
+        #self.report_id(video_id)        
 
-        # Get webpage content
-        try:
-            webpage = urllib2.urlopen(url).read()
-        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-            return
-        self.report_webpage(url)
+        webpage = self._download_webpage(url, video_id)
+        #self.report_webpage(url)
 
         # Get the video title
-        result = re.search(self.VIDEO_TITLE_RE, webpage)
+        VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
+        result = re.search(VIDEO_TITLE_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video title')
             return
-        video_title = result.group('title').decode('utf-8').strip()
-        self.report_title(video_title)
+        video_title = result.group('title').strip()
+        #self.report_title(video_title)
 
         # Get the video date
-        result = re.search(self.VIDEO_DATE_RE, webpage)
+        VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
+        result = re.search(VIDEO_DATE_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video date')
             return
-        upload_date = result.group('date').decode('utf-8').strip()
-        self.report_upload_date(upload_date)
+        upload_date = result.group('date').strip()
+        #self.report_upload_date(upload_date)
 
         # Get the video uploader
-        result = re.search(self.VIDEO_UPLOADER_RE, webpage)
+        VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
+        result = re.search(VIDEO_UPLOADER_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract uploader')
             return
-        video_uploader = result.group('uploader').decode('utf-8').strip()
+        video_uploader = result.group('uploader').strip()
         video_uploader = clean_html( video_uploader )
-        self.report_uploader(video_uploader)
+        #self.report_uploader(video_uploader)
 
         # Get all of the formats available
-        result = re.search(self.DOWNLOAD_LIST_RE, webpage)
+        DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
+        result = re.search(DOWNLOAD_LIST_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract download list')
             return
-        download_list_html = result.group('download_list').decode('utf-8').strip()
+        download_list_html = result.group('download_list').strip()
 
         # Get all of the links from the page
-        links = re.findall(self.LINK_RE, download_list_html)
+        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
+        links = re.findall(LINK_RE, download_list_html)
         if(len(links) == 0):
             self._downloader.trouble(u'ERROR: no known formats available for video')
             return
@@ -3853,8 +3830,8 @@ class YouPornIE(InfoExtractor):
             # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
             # A path looks like this:
             # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
-            video_url = unescapeHTML( link.decode('utf-8') )
-            path = urlparse( video_url ).path
+            video_url = unescapeHTML( link )
+            path = compat_urllib_parse_urlparse( video_url ).path
             extension = os.path.splitext( path )[1][1:]
             format = path.split('/')[4].split('_')[:2]
             size = format[0]
@@ -3903,29 +3880,25 @@ class PornotubeIE(InfoExtractor):
     """Information extractor for pornotube.com."""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
-    IE_NAME = u'pornotube'
-    VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
-    VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
-
 
-    def __init__(self, downloader=None):
-        InfoExtractor.__init__(self, downloader)
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
 
-    def report_extract_entry(self, url):
-        """Report downloading extry"""
-        self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
+    def report_extract_entry(self, url):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
 
-    def report_date(self, upload_date):
-        """Report finding uploaded date"""
-        self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
+    def report_date(self, upload_date):
+        """Report finding uploaded date"""
+        self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
 
-    def report_webpage(self, url):
-        """Report downloading page"""
-        self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
+    def report_webpage(self, url):
+        """Report downloading page"""
+        self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
 
-    def report_title(self, video_title):
-        """Report downloading extry"""
-        self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
+    def report_title(self, video_title):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -3933,34 +3906,31 @@ class PornotubeIE(InfoExtractor):
             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
             return
 
-        video_id = mobj.group('videoid').decode('utf-8')
-        video_title = mobj.group('title').decode('utf-8')
-        self.report_title(video_title);
+        video_id = mobj.group('videoid')
+        video_title = mobj.group('title')
+        #self.report_title(video_title);
 
         # Get webpage content
-        try:
-            webpage = urllib2.urlopen(url).read()
-        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-            return
-        self.report_webpage(url)
+        webpage = self._download_webpage(url, video_id)
+        #self.report_webpage(url)
 
         # Get the video URL
-        result = re.search(self.VIDEO_URL_RE, webpage)
+        VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
+        result = re.search(VIDEO_URL_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video url')
             return
-        video_url = urllib.unquote(result.group('url').decode('utf-8'))
-        self.report_extract_entry(video_url)
+        video_url = compat_urllib_parse.unquote(result.group('url'))
+        #self.report_extract_entry(video_url)
 
         #Get the uploaded date
-        result = re.search(self.VIDEO_UPLOADED_RE, webpage)
+        VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
+        result = re.search(VIDEO_UPLOADED_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video title')
             return
-        upload_date = result.group('date').decode('utf-8')
-        self.report_date(upload_date);
-
+        upload_date = result.group('date')
+        #self.report_date(upload_date);
 
         info = {'id': video_id,
                 'url': video_url,
@@ -3980,71 +3950,69 @@ class PornotubeIE(InfoExtractor):
 class YouJizzIE(InfoExtractor):
     """Information extractor for youjizz.com."""
 
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/([^.]+).html$'
-    IE_NAME = u'youjizz'
-    VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
-    EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
-    SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
 
     def __init__(self, downloader=None):
         InfoExtractor.__init__(self, downloader)
 
-    def report_extract_entry(self, url):
-        """Report downloading extry"""
-        self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
+    def report_extract_entry(self, url):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
 
-    def report_webpage(self, url):
-        """Report downloading page"""
-        self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
+    def report_webpage(self, url):
+        """Report downloading page"""
+        self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
 
-    def report_title(self, video_title):
-        """Report downloading extry"""
-        self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
+    def report_title(self, video_title):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
 
-    def report_embed_page(self, embed_page):
-        """Report downloading extry"""
-        self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
+    def report_embed_page(self, embed_page):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
 
     def _real_extract(self, url):
-        # Get webpage content
-        try:
-            webpage = urllib2.urlopen(url).read()
-        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
             return
-        self.report_webpage(url)
+
+        video_id = mobj.group('videoid')
+
+        # Get webpage content
+        webpage = self._download_webpage(url, video_id)
+        #self.report_webpage(url)
 
         # Get the video title
-        result = re.search(self.VIDEO_TITLE_RE, webpage)
+        VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
+        result = re.search(VIDEO_TITLE_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video title')
             return
-        video_title = result.group('title').decode('utf-8').strip()
-        self.report_title(video_title)
+        video_title = result.group('title').strip()
+        #self.report_title(video_title)
 
         # Get the embed page
-        result = re.search(self.EMBED_PAGE_RE, webpage)
+        EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
+        result = re.search(EMBED_PAGE_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract embed page')
             return
 
-        embed_page_url = result.group(0).decode('utf-8').strip()
-        video_id = result.group('videoid').decode('utf-8')
-        self.report_embed_page(embed_page_url)
+        embed_page_url = result.group(0).strip()
+        video_id = result.group('videoid')
+        #self.report_embed_page(embed_page_url)
     
-        try:
-            webpage = urllib2.urlopen(embed_page_url).read()
-        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-            self._downloader.trouble(u'ERROR: unable to download video embed page: %s' % err)
-            return
-        
+        webpage = self._download_webpage(embed_page_url, video_id)
+
         # Get the video URL
-        result = re.search(self.SOURCE_RE, webpage)
+        SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
+        result = re.search(SOURCE_RE, webpage)
         if result is None:
             self._downloader.trouble(u'ERROR: unable to extract video url')
             return
-        video_url = result.group('source').decode('utf-8')
-        self.report_extract_entry(video_url)
+        video_url = result.group('source')
+        #self.report_extract_entry(video_url)
 
         info = {'id': video_id,
                 'url': video_url,