]> gitweb @ CieloNegro.org - youtube-dl.git/blobdiff - youtube-dl
Minor documentation change
[youtube-dl.git] / youtube-dl
index cedbf5977667d2100fe27184a222f00908be8f4c..496ae036fa7c98ed3166e0dfb16e857f4efad409 100755 (executable)
@@ -65,9 +65,10 @@ class FileDownloader(object):
        For this, file downloader objects have a method that allows
        InfoExtractors to be registered in a given order. When it is passed
        a URL, the file downloader handles it to the first InfoExtractor it
-       finds that reports being able to handle it. The InfoExtractor returns
-       all the information to the FileDownloader and the latter downloads the
-       file or does whatever it's instructed to do.
+       finds that reports being able to handle it. The InfoExtractor extracts
+       all the information about the video or videos the URL refers to, and
+       asks the FileDownloader to process the video information, possibly
+       downloading the video.
 
        File downloaders accept a lot of parameters. In order not to saturate
        the object constructor with arguments, it receives a dictionary of
@@ -189,7 +190,7 @@ class FileDownloader(object):
        def to_stdout(self, message, skip_eol=False):
                """Print message to stdout if not in quiet mode."""
                if not self.params.get('quiet', False):
-                       print u'%s%s' % (message, [u'\n', u''][skip_eol]),
+                       print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(locale.getpreferredencoding()),
                        sys.stdout.flush()
        
        def to_stderr(self, message):
@@ -243,9 +244,9 @@ class FileDownloader(object):
                """Process a single dictionary returned by an InfoExtractor."""
                # Forced printings
                if self.params.get('forcetitle', False):
-                       print info_dict['title']
+                       print info_dict['title'].encode(locale.getpreferredencoding())
                if self.params.get('forceurl', False):
-                       print info_dict['url']
+                       print info_dict['url'].encode(locale.getpreferredencoding())
                        
                # Do nothing else if in simulate mode
                if self.params.get('simulate', False):
@@ -301,21 +302,8 @@ class FileDownloader(object):
                                # Suitable InfoExtractor found
                                suitable_found = True
 
-                               # Extract information from URL
-                               all_results = ie.extract(url)
-                               results = [x for x in all_results if x is not None]
-
-                               # See if there were problems extracting any information
-                               if len(results) != len(all_results):
-                                       self.trouble()
-
-                               # Two results could go to the same file
-                               if len(results) > 1 and self.fixed_template():
-                                       raise SameFileError(self.params['outtmpl'])
-
-                               # Process each result
-                               for result in results:
-                                       self.process_info(result)
+                               # Extract information from URL and process it
+                               ie.extract(url)
 
                                # Suitable InfoExtractor had been found; go to next URL
                                break
@@ -373,9 +361,10 @@ class InfoExtractor(object):
        Information extractors are the classes that, given a URL, extract
        information from the video (or videos) the URL refers to. This
        information includes the real video URL, the video title and simplified
-       title, author and others. It is returned in a list of dictionaries when
-       calling its extract() method. It is a list because a URL can refer to
-       more than one video (think of playlists). The dictionaries must include
+       title, author and others. The information is stored in a dictionary
+       which is then passed to the FileDownloader. The FileDownloader
+       processes this information possibly downloading the video to the file
+       system, among other possible outcomes. The dictionaries must include
        the following fields:
 
        id:             Video identifier.
@@ -553,15 +542,15 @@ class YoutubeIE(InfoExtractor):
                        self.report_age_confirmation()
                        age_results = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.to_stderr(u'ERROR: unable to confirm age: %s' % str(err))
+                       self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
                        return
 
        def _real_extract(self, url):
                # Extract video id from URL
                mobj = re.match(self._VALID_URL, url)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: invalid URL: %s' % url)
-                       return [None]
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
                video_id = mobj.group(2)
 
                # Downloader parameters
@@ -586,15 +575,15 @@ class YoutubeIE(InfoExtractor):
                        self.report_webpage_download(video_id)
                        video_webpage = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.to_stderr(u'ERROR: unable to download video webpage: %s' % str(err))
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
                self.report_information_extraction(video_id)
                
                # "t" param
                mobj = re.search(r', "t": "([^"]+)"', video_webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract "t" parameter')
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to extract "t" parameter')
+                       return
                video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&el=detailpage&ps=' % (video_id, mobj.group(1))
                if format_param is not None:
                        video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
@@ -603,15 +592,15 @@ class YoutubeIE(InfoExtractor):
                # uploader
                mobj = re.search(r"var watchUsername = '([^']+)';", video_webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract uploader nickname')
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
                video_uploader = mobj.group(1)
 
                # title
                mobj = re.search(r'(?im)<title>YouTube - ([^<]*)</title>', video_webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract video title')
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
                video_title = mobj.group(1).decode('utf-8')
                video_title = re.sub(ur'(?u)&(.+?);', self.htmlentity_transform, video_title)
                video_title = video_title.replace(os.sep, u'%')
@@ -621,20 +610,21 @@ class YoutubeIE(InfoExtractor):
                simple_title = simple_title.strip(ur'_')
 
                # Process video information
-               return [{
+               self._downloader.process_info({
                        'id':           video_id.decode('utf-8'),
                        'url':          video_real_url.decode('utf-8'),
                        'uploader':     video_uploader.decode('utf-8'),
                        'title':        video_title,
                        'stitle':       simple_title,
                        'ext':          video_extension.decode('utf-8'),
-                       }]
+                       })
 
 class MetacafeIE(InfoExtractor):
        """Information Extractor for metacafe.com."""
 
        _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
        _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
+       _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
        _youtube_ie = None
 
        def __init__(self, youtube_ie, downloader=None):
@@ -668,7 +658,7 @@ class MetacafeIE(InfoExtractor):
                        self.report_disclaimer()
                        disclaimer = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.to_stderr(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
+                       self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
                        return
 
                # Confirm age
@@ -676,27 +666,28 @@ class MetacafeIE(InfoExtractor):
                        'filters': '0',
                        'submit': "Continue - I'm over 18",
                        }
-               request = urllib2.Request('http://www.metacafe.com/', urllib.urlencode(disclaimer_form), std_headers)
+               request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
                try:
                        self.report_age_confirmation()
                        disclaimer = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.to_stderr(u'ERROR: unable to confirm age: %s' % str(err))
+                       self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
                        return
        
        def _real_extract(self, url):
                # Extract id and simplified title from URL
                mobj = re.match(self._VALID_URL, url)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: invalid URL: %s' % url)
-                       return [None]
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
 
                video_id = mobj.group(1)
 
                # Check if video comes from YouTube
                mobj2 = re.match(r'^yt-(.*)$', video_id)
                if mobj2 is not None:
-                       return self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
+                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
+                       return
 
                simple_title = mobj.group(2).decode('utf-8')
                video_extension = 'flv'
@@ -707,46 +698,46 @@ class MetacafeIE(InfoExtractor):
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.to_stderr(u'ERROR: unable retrieve video webpage: %s' % str(err))
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
+                       return
 
                # Extract URL, uploader and title from webpage
                self.report_extraction(video_id)
-               mobj = re.search(r'(?m)"mediaURL":"(http.*?\.flv)"', webpage)
+               mobj = re.search(r'(?m)&mediaURL=(http.*?\.flv)', webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract media URL')
-                       return [None]
-               mediaURL = mobj.group(1).replace('\\', '')
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               mediaURL = urllib.unquote(mobj.group(1))
 
-               mobj = re.search(r'(?m)"gdaKey":"(.*?)"', webpage)
+               mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract gdaKey')
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to extract gdaKey')
+                       return
                gdaKey = mobj.group(1)
 
                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
 
                mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract title')
-                       return [None]
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
                video_title = mobj.group(1).decode('utf-8')
 
-               mobj = re.search(r'(?m)<li id="ChnlUsr">.*?Submitter:<br />(.*?)</li>', webpage)
+               mobj = re.search(r'(?ms)<li id="ChnlUsr">.*?Submitter:.*?<a .*?>(.*?)<', webpage)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: unable to extract uploader nickname')
-                       return [None]
-               video_uploader = re.sub(r'<.*?>', '', mobj.group(1))
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
+               video_uploader = mobj.group(1)
 
-               # Return information
-               return [{
+               # Process video information
+               self._downloader.process_info({
                        'id':           video_id.decode('utf-8'),
                        'url':          video_url.decode('utf-8'),
                        'uploader':     video_uploader.decode('utf-8'),
                        'title':        video_title,
                        'stitle':       simple_title,
                        'ext':          video_extension.decode('utf-8'),
-                       }]
+                       })
 
 
 class YoutubeSearchIE(InfoExtractor):
@@ -776,27 +767,31 @@ class YoutubeSearchIE(InfoExtractor):
        def _real_extract(self, query):
                mobj = re.match(self._VALID_QUERY, query)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: invalid search query "%s"' % query)
-                       return [None]
+                       self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+                       return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
                if prefix == '':
-                       return self._download_n_results(query, 1)
+                       self._download_n_results(query, 1)
+                       return
                elif prefix == 'all':
-                       return self._download_n_results(query, self._max_youtube_results)
+                       self._download_n_results(query, self._max_youtube_results)
+                       return
                else:
                        try:
                                n = int(prefix)
                                if n <= 0:
-                                       self._downloader.to_stderr(u'ERROR: invalid download number %s for query "%s"' % (n, query))
-                                       return [None]
+                                       self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                                       return
                                elif n > self._max_youtube_results:
                                        self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
                                        n = self._max_youtube_results
-                               return self._download_n_results(query, n)
+                               self._download_n_results(query, n)
+                               return
                        except ValueError: # parsing prefix as int fails
-                               return self._download_n_results(query, 1)
+                               self._download_n_results(query, 1)
+                               return
 
        def _download_n_results(self, query, n):
                """Downloads a specified number of results for a query"""
@@ -812,8 +807,8 @@ class YoutubeSearchIE(InfoExtractor):
                        try:
                                page = urllib2.urlopen(request).read()
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                               self._downloader.to_stderr(u'ERROR: unable to download webpage: %s' % str(err))
-                               return [None]
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
 
                        # Extract video identifiers
                        for mobj in re.finditer(self._VIDEO_INDICATOR, page):
@@ -823,16 +818,14 @@ class YoutubeSearchIE(InfoExtractor):
                                        already_seen.add(video_id)
                                        if len(video_ids) == n:
                                                # Specified n videos reached
-                                               information = []
                                                for id in video_ids:
-                                                       information.extend(self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id))
-                                               return information
+                                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+                                               return
 
                        if self._MORE_PAGES_INDICATOR not in page:
-                               information = []
                                for id in video_ids:
-                                       information.extend(self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id))
-                               return information
+                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+                               return
 
                        pagenum = pagenum + 1
 
@@ -864,8 +857,8 @@ class YoutubePlaylistIE(InfoExtractor):
                # Extract playlist id
                mobj = re.match(self._VALID_URL, url)
                if mobj is None:
-                       self._downloader.to_stderr(u'ERROR: invalid url: %s' % url)
-                       return [None]
+                       self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+                       return
 
                # Download playlist pages
                playlist_id = mobj.group(1)
@@ -878,8 +871,8 @@ class YoutubePlaylistIE(InfoExtractor):
                        try:
                                page = urllib2.urlopen(request).read()
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                               self._downloader.to_stderr(u'ERROR: unable to download webpage: %s' % str(err))
-                               return [None]
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
 
                        # Extract video identifiers
                        ids_in_page = []
@@ -892,10 +885,9 @@ class YoutubePlaylistIE(InfoExtractor):
                                break
                        pagenum = pagenum + 1
 
-               information = []
                for id in video_ids:
-                       information.extend(self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id))
-               return information
+                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+               return
 
 class PostProcessor(object):
        """Post Processor class.
@@ -927,7 +919,7 @@ class PostProcessor(object):
                """Run the PostProcessor.
 
                The "information" argument is a dictionary like the ones
-               returned by InfoExtractors. The only difference is that this
+               composed by InfoExtractors. The only difference is that this
                one has an extra field called "filepath" that points to the
                downloaded file.
 
@@ -1005,7 +997,9 @@ if __name__ == '__main__':
                batchurls = []
                if opts.batchfile is not None:
                        try:
-                               batchurls = [line.strip() for line in open(opts.batchfile, 'r')]
+                               batchurls = open(opts.batchfile, 'r').readlines()
+                               batchurls = [x.strip() for x in batchurls]
+                               batchurls = [x for x in batchurls if len(x) > 0]
                        except IOError:
                                sys.exit(u'ERROR: batch file could not be read')
                all_urls = batchurls + args
@@ -1036,9 +1030,6 @@ if __name__ == '__main__':
                youtube_search_ie = YoutubeSearchIE(youtube_ie)
 
                # File downloader
-               charset = locale.getpreferredencoding()
-               if charset is None:
-                       charset = 'ascii'
                fd = FileDownloader({
                        'usenetrc': opts.usenetrc,
                        'username': opts.username,
@@ -1048,7 +1039,7 @@ if __name__ == '__main__':
                        'forcetitle': opts.gettitle,
                        'simulate': (opts.simulate or opts.geturl or opts.gettitle),
                        'format': opts.format,
-                       'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(charset))
+                       'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(locale.getpreferredencoding()))
                                or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
                                or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
                                or u'%(id)s.%(ext)s'),