#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Ricardo Garcia Gonzalez
+# Author: Danny Colligan
# License: Public domain code
import htmlentitydefs
import httplib
+import locale
import math
import netrc
import os
import urllib
import urllib2
-std_headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.1) Gecko/2008070208 Firefox/3.0.1',
+std_headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'Accept-Language': 'en-us,en;q=0.5',
"""
pass
+class PostProcessingError(Exception):
+ """Post Processing exception.
+
+ This exception may be raised by PostProcessor's .run() method to
+ indicate an error in the postprocessing task.
+ """
+ pass
+
+class UnavailableFormatError(Exception):
+ """Unavailable Format exception.
+
+ This exception will be thrown when a video is requested
+ in a format that is not available for that video.
+ """
+ pass
+
+class ContentTooShortError(Exception):
+ """Content Too Short exception.
+
+ This exception may be raised by FileDownloader objects when a file they
+ download is too small for what the server announced first, indicating
+ the connection was probably interrupted.
+ """
+ # Both in bytes
+ downloaded = None
+ expected = None
+
+ def __init__(self, downloaded, expected):
+ self.downloaded = downloaded
+ self.expected = expected
+
class FileDownloader(object):
"""File Downloader class.
For this, file downloader objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the file downloader handles it to the first InfoExtractor it
- finds that reports being able to handle it. The InfoExtractor returns
- all the information to the FileDownloader and the latter downloads the
- file or does whatever it's instructed to do.
+ finds that reports being able to handle it. The InfoExtractor extracts
+ all the information about the video or videos the URL refers to, and
+ asks the FileDownloader to process the video information, possibly
+ downloading the video.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
- options instead. These options are available through the get_params()
- method for the InfoExtractors to use. The FileDownloader also registers
- itself as the downloader in charge for the InfoExtractors that are
- added to it, so this is a "mutual registration".
+ options instead. These options are available through the params
+ attribute for the InfoExtractors to use. The FileDownloader also
+ registers itself as the downloader in charge for the InfoExtractors
+ that are added to it, so this is a "mutual registration".
Available options:
outtmpl: Template for output names.
ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec.
+ nooverwrites: Prevent overwriting files.
+ continuedl: Try to continue downloads if possible.
"""
- _params = None
+ params = None
_ies = []
+ _pps = []
+ _download_retcode = None
def __init__(self, params):
"""Create a FileDownloader object with the given options."""
self._ies = []
- self.set_params(params)
+ self._pps = []
+ self._download_retcode = 0
+ self.params = params
@staticmethod
def pmkdir(filename):
def format_bytes(bytes):
if bytes is None:
return 'N/A'
- if bytes == 0:
+ if type(bytes) is str:
+ bytes = float(bytes)
+ if bytes == 0.0:
exponent = 0
else:
- exponent = long(math.log(float(bytes), 1024.0))
+ exponent = long(math.log(bytes, 1024.0))
suffix = 'bkMGTPEZY'[exponent]
converted = float(bytes) / float(1024**exponent)
return '%.2f%s' % (converted, suffix)
return '--:--'
return '%02d:%02d' % (eta_mins, eta_secs)
- @staticmethod
+ @staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
- return int(new_max)
+ return long(new_max)
rate = bytes / elapsed_time
if rate > new_max:
- return int(new_max)
+ return long(new_max)
if rate < new_min:
- return int(new_min)
- return int(rate)
+ return long(new_min)
+ return long(rate)
@staticmethod
def parse_bytes(bytestr):
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return long(round(number * multiplier))
- def set_params(self, params):
- """Sets parameters."""
- if type(params) != dict:
- raise ValueError('params: dictionary expected')
- self._params = params
-
- def get_params(self):
- """Get parameters."""
- return self._params
+ @staticmethod
+ def verify_url(url):
+ """Verify a URL is valid and data could be downloaded."""
+ request = urllib2.Request(url, None, std_headers)
+ data = urllib2.urlopen(request)
+ data.read(1)
+ data.close()
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
ie.set_downloader(self)
+ def add_post_processor(self, pp):
+ """Add a PostProcessor object to the end of the chain."""
+ self._pps.append(pp)
+ pp.set_downloader(self)
+
def to_stdout(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
- if not self._params.get('quiet', False):
- if skip_eol:
- print message,
- else:
- print message
+ if not self.params.get('quiet', False):
+ print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(locale.getpreferredencoding()),
sys.stdout.flush()
def to_stderr(self, message):
"""Print message to stderr."""
- print >>sys.stderr, message
+ print >>sys.stderr, message.encode(locale.getpreferredencoding())
def fixed_template(self):
"""Checks if the output template is fixed."""
- return (re.search(ur'(?u)%\(.+?\)s', self._params['outtmpl']) is None)
+ return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
def trouble(self, message=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
- not when errors are found, after printing the message. If it
- doesn't raise, it returns an error code suitable to be returned
- later as a program exit code to indicate error.
+ not when errors are found, after printing the message.
"""
if message is not None:
self.to_stderr(message)
- if not self._params.get('ignoreerrors', False):
+ if not self.params.get('ignoreerrors', False):
raise DownloadError(message)
- return 1
+ self._download_retcode = 1
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
- rate_limit = self._params.get('ratelimit', None)
+ rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
now = time.time()
"""Report download progress."""
self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
+
+ def report_resuming_byte(self, resume_len):
+ """Report attemtp to resume at given byte."""
+ self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
+
+ def report_file_already_downloaded(self, file_name):
+ """Report file has already been fully downloaded."""
+ self.to_stdout(u'[download] %s has already been downloaded' % file_name)
+
+ def report_unable_to_resume(self):
+ """Report it was impossible to resume download."""
+ self.to_stdout(u'[download] Unable to resume')
def report_finish(self):
"""Report download finished."""
self.to_stdout(u'')
+ def process_info(self, info_dict):
+ """Process a single dictionary returned by an InfoExtractor."""
+ # Do nothing else if in simulate mode
+ if self.params.get('simulate', False):
+ try:
+ self.verify_url(info_dict['url'])
+ except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
+ raise UnavailableFormatError
+
+ # Forced printings
+ if self.params.get('forcetitle', False):
+ print info_dict['title'].encode(locale.getpreferredencoding())
+ if self.params.get('forceurl', False):
+ print info_dict['url'].encode(locale.getpreferredencoding())
+
+ return
+
+ try:
+ template_dict = dict(info_dict)
+ template_dict['epoch'] = unicode(long(time.time()))
+ filename = self.params['outtmpl'] % template_dict
+ except (ValueError, KeyError), err:
+ self.trouble('ERROR: invalid output template or system charset: %s' % str(err))
+ if self.params['nooverwrites'] and os.path.exists(filename):
+ self.to_stderr(u'WARNING: file exists: %s; skipping' % filename)
+ return
+
+ try:
+ self.pmkdir(filename)
+ except (OSError, IOError), err:
+ self.trouble('ERROR: unable to create directories: %s' % str(err))
+ return
+
+ try:
+ success = self._do_download(filename, info_dict['url'])
+ except (OSError, IOError), err:
+ raise UnavailableFormatError
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self.trouble('ERROR: unable to download video data: %s' % str(err))
+ return
+ except (ContentTooShortError, ), err:
+ self.trouble('ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+ return
+
+ if success:
+ try:
+ self.post_process(filename, info_dict)
+ except (PostProcessingError), err:
+ self.trouble('ERROR: postprocessing: %s' % str(err))
+ return
+
def download(self, url_list):
"""Download a given list of URLs."""
- retcode = 0
if len(url_list) > 1 and self.fixed_template():
- raise SameFileError(self._params['outtmpl'])
+ raise SameFileError(self.params['outtmpl'])
for url in url_list:
suitable_found = False
for ie in self._ies:
+ # Go to next InfoExtractor if not suitable
if not ie.suitable(url):
continue
+
# Suitable InfoExtractor found
suitable_found = True
- all_results = ie.extract(url)
- results = [x for x in all_results if x is not None]
- if len(results) != len(all_results):
- retcode = self.trouble()
-
- if len(results) > 1 and self.fixed_template():
- raise SameFileError(self._params['outtmpl'])
-
- for result in results:
- # Forced printings
- if self._params.get('forcetitle', False):
- print result['title']
- if self._params.get('forceurl', False):
- print result['url']
-
- # Do nothing else if in simulate mode
- if self._params.get('simulate', False):
- continue
- try:
- filename = self._params['outtmpl'] % result
- self.report_destination(filename)
- except (ValueError, KeyError), err:
- retcode = self.trouble('ERROR: invalid output template: %s' % str(err))
- continue
- try:
- self.pmkdir(filename)
- except (OSError, IOError), err:
- retcode = self.trouble('ERROR: unable to create directories: %s' % str(err))
- continue
- try:
- outstream = open(filename, 'wb')
- except (OSError, IOError), err:
- retcode = self.trouble('ERROR: unable to open for writing: %s' % str(err))
- continue
- try:
- self._do_download(outstream, result['url'])
- outstream.close()
- except (OSError, IOError), err:
- retcode = self.trouble('ERROR: unable to write video data: %s' % str(err))
- continue
- except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- retcode = self.trouble('ERROR: unable to download video data: %s' % str(err))
- continue
+ # Extract information from URL and process it
+ ie.extract(url)
+
+ # Suitable InfoExtractor had been found; go to next URL
break
+
if not suitable_found:
- retcode = self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
+ self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
+
+ return self._download_retcode
- return retcode
+ def post_process(self, filename, ie_info):
+ """Run the postprocessing chain on the given file."""
+ info = dict(ie_info)
+ info['filepath'] = filename
+ for pp in self._pps:
+ info = pp.run(info)
+ if info is None:
+ break
- def _do_download(self, stream, url):
+ def _do_download(self, filename, url):
+ stream = None
+ open_mode = 'ab'
+
+ basic_request = urllib2.Request(url, None, std_headers)
request = urllib2.Request(url, None, std_headers)
- data = urllib2.urlopen(request)
+
+ # Attempt to resume download with "continuedl" option
+ if os.path.isfile(filename):
+ resume_len = os.path.getsize(filename)
+ else:
+ resume_len = 0
+ if self.params['continuedl'] and resume_len != 0:
+ self.report_resuming_byte(resume_len)
+ request.add_header('Range','bytes=%d-' % resume_len)
+
+ # Establish connection
+ try:
+ data = urllib2.urlopen(request)
+ except (urllib2.HTTPError, ), err:
+ if err.code != 416: # 416 is 'Requested range not satisfiable'
+ raise
+ data = urllib2.urlopen(basic_request)
+ content_length = data.info()['Content-Length']
+ if content_length is not None and long(content_length) == resume_len:
+ self.report_file_already_downloaded(filename)
+ return True
+ else:
+ self.report_unable_to_resume()
+ open_mode = 'wb'
+
data_len = data.info().get('Content-length', None)
data_len_str = self.format_bytes(data_len)
byte_counter = 0
block_size = 1024
start = time.time()
while True:
- # Progress message
- percent_str = self.calc_percent(byte_counter, data_len)
- eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
- speed_str = self.calc_speed(start, time.time(), byte_counter)
- self.report_progress(percent_str, data_len_str, speed_str, eta_str)
-
# Download and write
before = time.time()
data_block = data.read(block_size)
if data_block_len == 0:
break
byte_counter += data_block_len
+
+ # Open file just in time
+ if stream is None:
+ try:
+ stream = open(filename, open_mode)
+ self.report_destination(filename)
+ except (OSError, IOError), err:
+ self.trouble('ERROR: unable to open for writing: %s' % str(err))
+ return False
stream.write(data_block)
block_size = self.best_block_size(after - before, data_block_len)
+ # Progress message
+ percent_str = self.calc_percent(byte_counter, data_len)
+ eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
+ speed_str = self.calc_speed(start, time.time(), byte_counter)
+ self.report_progress(percent_str, data_len_str, speed_str, eta_str)
+
# Apply rate limit
self.slow_down(start, byte_counter)
self.report_finish()
if data_len is not None and str(byte_counter) != data_len:
- raise ValueError('Content too short: %s/%s bytes' % (byte_counter, data_len))
+ raise ContentTooShortError(byte_counter, long(data_len))
+ return True
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information from the video (or videos) the URL refers to. This
information includes the real video URL, the video title and simplified
- title, author and others. It is returned in a list of dictionaries when
- calling its extract() method. It is a list because a URL can refer to
- more than one video (think of playlists). The dictionaries must include
+ title, author and others. The information is stored in a dictionary
+ which is then passed to the FileDownloader. The FileDownloader
+ processes this information possibly downloading the video to the file
+ system, among other possible outcomes. The dictionaries must include
the following fields:
id: Video identifier.
"""Sets the downloader for this IE."""
self._downloader = downloader
- def to_stdout(self, message):
- """Print message to stdout if downloader is not in quiet mode."""
- if self._downloader is None or not self._downloader.get_params().get('quiet', False):
- print message
-
- def to_stderr(self, message):
- """Print message to stderr."""
- print >>sys.stderr, message
-
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
"""Information extractor for youtube.com."""
_VALID_URL = r'^((?:http://)?(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:\.php)?)?\?(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
- _LOGIN_URL = 'http://www.youtube.com/login?next=/'
- _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/'
+ _LANG_URL = r'http://uk.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+ _LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
+ _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NETRC_MACHINE = 'youtube'
+ _available_formats = ['22', '35', '18', '5', '17', '13', None] # listed in order of priority for -b flag
+ _video_extensions = {
+ '13': '3gp',
+ '17': 'mp4',
+ '18': 'mp4',
+ '22': 'mp4',
+ }
@staticmethod
def suitable(url):
return (re.match(YoutubeIE._VALID_URL, url) is not None)
+ @staticmethod
+ def htmlentity_transform(matchobj):
+ """Transforms an HTML entity to a Unicode character."""
+ entity = matchobj.group(1)
+
+ # Known non-numeric HTML entity
+ if entity in htmlentitydefs.name2codepoint:
+ return unichr(htmlentitydefs.name2codepoint[entity])
+
+ # Unicode character
+ mobj = re.match(ur'(?u)#(x?\d+)', entity)
+ if mobj is not None:
+ numstr = mobj.group(1)
+ if numstr.startswith(u'x'):
+ base = 16
+ numstr = u'0%s' % numstr
+ else:
+ base = 10
+ return unichr(long(numstr, base))
+
+ # Unknown entity in name, return its literal representation
+ return (u'&%s;' % entity)
+
+ def report_lang(self):
+ """Report attempt to set language."""
+ self._downloader.to_stdout(u'[youtube] Setting language')
+
def report_login(self):
"""Report attempt to log in."""
- self.to_stdout(u'[youtube] Logging in')
+ self._downloader.to_stdout(u'[youtube] Logging in')
def report_age_confirmation(self):
"""Report attempt to confirm age."""
- self.to_stdout(u'[youtube] Confirming age')
+ self._downloader.to_stdout(u'[youtube] Confirming age')
- def report_webpage_download(self, video_id):
- """Report attempt to download webpage."""
- self.to_stdout(u'[youtube] %s: Downloading video webpage' % video_id)
+ def report_video_info_webpage_download(self, video_id):
+ """Report attempt to download video info webpage."""
+ self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
- self.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
+ self._downloader.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
def report_video_url(self, video_id, video_real_url):
"""Report extracted video URL."""
- self.to_stdout(u'[youtube] %s: URL: %s' % (video_id, video_real_url))
-
+ self._downloader.to_stdout(u'[youtube] %s: URL: %s' % (video_id, video_real_url))
+
+ def report_unavailable_format(self, video_id, format):
+ """Report extracted video URL."""
+ self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
+
def _real_initialize(self):
if self._downloader is None:
return
username = None
password = None
- downloader_params = self._downloader.get_params()
+ downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err:
- self.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
+ self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
return
+ # Set language
+ request = urllib2.Request(self._LANG_URL, None, std_headers)
+ try:
+ self.report_lang()
+ urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
+ return
+
# No authentication to be performed
if username is None:
return
self.report_login()
login_results = urllib2.urlopen(request).read()
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
- self.to_stderr(u'WARNING: unable to log in: bad username or password')
+ self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'WARNING: unable to log in: %s' % str(err))
+ self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
return
# Confirm age
self.report_age_confirmation()
age_results = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable to confirm age: %s' % str(err))
+ self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
return
def _real_extract(self, url):
# Extract video id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self.to_stderr(u'ERROR: invalid URL: %s' % url)
- return [None]
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
video_id = mobj.group(2)
# Downloader parameters
+ best_quality = False
format_param = None
+ quality_index = 0
if self._downloader is not None:
- params = self._downloader.get_params()
+ params = self._downloader.params
format_param = params.get('format', None)
+ if format_param == '0':
+ format_param = self._available_formats[quality_index]
+ best_quality = True
- # Extension
- video_extension = {'18': 'mp4', '17': '3gp'}.get(format_param, 'flv')
+ while True:
+ # Extension
+ video_extension = self._video_extensions.get(format_param, 'flv')
- # Normalize URL, including format
- normalized_url = 'http://www.youtube.com/watch?v=%s' % video_id
- if format_param is not None:
- normalized_url = '%s&fmt=%s' % (normalized_url, format_param)
- request = urllib2.Request(normalized_url, None, std_headers)
- try:
- self.report_webpage_download(video_id)
- video_webpage = urllib2.urlopen(request).read()
- except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable to download video webpage: %s' % str(err))
- return [None]
- self.report_information_extraction(video_id)
-
- # "t" param
- mobj = re.search(r', "t": "([^"]+)"', video_webpage)
- if mobj is None:
- self.to_stderr(u'ERROR: unable to extract "t" parameter')
- return [None]
- video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s' % (video_id, mobj.group(1))
- if format_param is not None:
- video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
- self.report_video_url(video_id, video_real_url)
-
- # uploader
- mobj = re.search(r'More From: ([^<]*)<', video_webpage)
- if mobj is None:
- self.to_stderr(u'ERROR: unable to extract uploader nickname')
- return [None]
- video_uploader = mobj.group(1)
+ # Get video info
+ video_info_url = 'http://www.youtube.com/get_video_info?&video_id=%s&el=detailpage&ps=default&eurl=&gl=US&hl=en' % video_id
+ request = urllib2.Request(video_info_url, None, std_headers)
+ try:
+ self.report_video_info_webpage_download(video_id)
+ video_info_webpage = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
+ return
+ self.report_information_extraction(video_id)
+
+ # "t" param
+ mobj = re.search(r'(?m)&token=([^&]+)(?:&|$)', video_info_webpage)
+ if mobj is None:
+ # Attempt to see if YouTube has issued an error message
+ mobj = re.search(r'(?m)&reason=([^&]+)(?:&|$)', video_info_webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract "t" parameter for unknown reason')
+ stream = open('reportme-ydl-%s.dat' % time.time(), 'wb')
+ stream.write(video_info_webpage)
+ stream.close()
+ else:
+ reason = urllib.unquote_plus(mobj.group(1))
+ self._downloader.trouble(u'ERROR: YouTube said: %s' % reason.decode('utf-8'))
+ return
+ token = urllib.unquote(mobj.group(1))
+ video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=detailpage&ps=default&gl=US&hl=en' % (video_id, token)
+ if format_param is not None:
+ video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
+ self.report_video_url(video_id, video_real_url)
+
+ # uploader
+ mobj = re.search(r'(?m)&author=([^&]+)(?:&|$)', video_info_webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+ return
+ video_uploader = urllib.unquote(mobj.group(1))
+
+ # title
+ mobj = re.search(r'(?m)&title=([^&]+)(?:&|$)', video_info_webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ video_title = urllib.unquote(mobj.group(1))
+ video_title = video_title.decode('utf-8')
+ video_title = re.sub(ur'(?u)&(.+?);', self.htmlentity_transform, video_title)
+ video_title = video_title.replace(os.sep, u'%')
+
+ # simplified title
+ simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+ simple_title = simple_title.strip(ur'_')
+
+ try:
+ # Process video information
+ self._downloader.process_info({
+ 'id': video_id.decode('utf-8'),
+ 'url': video_real_url.decode('utf-8'),
+ 'uploader': video_uploader.decode('utf-8'),
+ 'title': video_title,
+ 'stitle': simple_title,
+ 'ext': video_extension.decode('utf-8'),
+ })
+
+ return
+
+ except UnavailableFormatError, err:
+ if best_quality:
+ if quality_index == len(self._available_formats) - 1:
+ # I don't ever expect this to happen
+ self._downloader.trouble(u'ERROR: no known formats available for video')
+ return
+ else:
+ self.report_unavailable_format(video_id, format_param)
+ quality_index += 1
+ format_param = self._available_formats[quality_index]
+ continue
+ else:
+ self._downloader.trouble('ERROR: format not available for video')
+ return
- # title
- mobj = re.search(r'(?im)<title>YouTube - ([^<]*)</title>', video_webpage)
- if mobj is None:
- self.to_stderr(u'ERROR: unable to extract video title')
- return [None]
- video_title = mobj.group(1).decode('utf-8')
- video_title = re.sub(ur'(?u)&(.+?);', lambda x: unichr(htmlentitydefs.name2codepoint[x.group(1)]), video_title)
- video_title = video_title.replace(os.sep, u'%')
-
- # simplified title
- simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
- simple_title = simple_title.strip(ur'_')
-
- # Return information
- return [{
- 'id': video_id.decode('utf-8'),
- 'url': video_real_url.decode('utf-8'),
- 'uploader': video_uploader.decode('utf-8'),
- 'title': video_title,
- 'stitle': simple_title,
- 'ext': video_extension.decode('utf-8'),
- }]
class MetacafeIE(InfoExtractor):
"""Information Extractor for metacafe.com."""
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
- _DISCLAIMER = 'http://www.metacafe.com/disclaimer'
+ _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
+ _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
_youtube_ie = None
def __init__(self, youtube_ie, downloader=None):
def report_disclaimer(self):
"""Report disclaimer retrieval."""
- self.to_stdout(u'[metacafe] Retrieving disclaimer')
+ self._downloader.to_stdout(u'[metacafe] Retrieving disclaimer')
def report_age_confirmation(self):
"""Report attempt to confirm age."""
- self.to_stdout(u'[metacafe] Confirming age')
+ self._downloader.to_stdout(u'[metacafe] Confirming age')
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
+ self._downloader.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
- self.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
+ self._downloader.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
def _real_initialize(self):
# Retrieve disclaimer
self.report_disclaimer()
disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
+ self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
return
# Confirm age
disclaimer_form = {
- 'allowAdultContent': '1',
+ 'filters': '0',
'submit': "Continue - I'm over 18",
}
- request = urllib2.Request('http://www.metacafe.com/watch/', urllib.urlencode(disclaimer_form), std_headers)
+ request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
try:
self.report_age_confirmation()
disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable to confirm age: %s' % str(err))
+ self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
return
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self.to_stderr(u'ERROR: invalid URL: %s' % url)
- return [None]
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
video_id = mobj.group(1)
# Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None:
- return self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
+ self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
+ return
simple_title = mobj.group(2).decode('utf-8')
video_extension = 'flv'
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable retrieve video webpage: %s' % str(err))
- return [None]
+ self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
+ return
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
- mobj = re.search(r'(?m)"mediaURL":"(http.*?\.flv)"', webpage)
+ mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is None:
- self.to_stderr(u'ERROR: unable to extract media URL')
- return [None]
- mediaURL = mobj.group(1).replace('\\', '')
+ self._downloader.trouble(u'ERROR: unable to extract media URL')
+ return
+ mediaURL = urllib.unquote(mobj.group(1))
- mobj = re.search(r'(?m)"gdaKey":"(.*?)"', webpage)
- if mobj is None:
- self.to_stderr(u'ERROR: unable to extract gdaKey')
- return [None]
- gdaKey = mobj.group(1)
+ #mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
+ #if mobj is None:
+ # self._downloader.trouble(u'ERROR: unable to extract gdaKey')
+ # return
+ #gdaKey = mobj.group(1)
+ #
+ #video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
- video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
+ video_url = mediaURL
- mobj = re.search(r'(?im)<meta name="title" content="Metacafe - ([^"]+)"', webpage)
+ mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
- self.to_stderr(u'ERROR: unable to extract title')
- return [None]
+ self._downloader.trouble(u'ERROR: unable to extract title')
+ return
video_title = mobj.group(1).decode('utf-8')
- mobj = re.search(r'(?m)<li id="ChnlUsr">.*?Submitter:<br />(.*?)</li>', webpage)
+ mobj = re.search(r'(?ms)<li id="ChnlUsr">.*?Submitter:.*?<a .*?>(.*?)<', webpage)
if mobj is None:
- self.to_stderr(u'ERROR: unable to extract uploader nickname')
- return [None]
- video_uploader = re.sub(r'<.*?>', '', mobj.group(1))
-
- # Return information
- return [{
- 'id': video_id.decode('utf-8'),
- 'url': video_url.decode('utf-8'),
- 'uploader': video_uploader.decode('utf-8'),
- 'title': video_title,
- 'stitle': simple_title,
- 'ext': video_extension.decode('utf-8'),
- }]
+ self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+ return
+ video_uploader = mobj.group(1)
+
+ try:
+ # Process video information
+ self._downloader.process_info({
+ 'id': video_id.decode('utf-8'),
+ 'url': video_url.decode('utf-8'),
+ 'uploader': video_uploader.decode('utf-8'),
+ 'title': video_title,
+ 'stitle': simple_title,
+ 'ext': video_extension.decode('utf-8'),
+ })
+ except UnavailableFormatError:
+ self._downloader.trouble(u'ERROR: format not available for video')
+
+
+class YoutubeSearchIE(InfoExtractor):
+ """Information Extractor for YouTube search queries."""
+ _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
+ _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
+ _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
+ _MORE_PAGES_INDICATOR = r'>Next</a>'
+ _youtube_ie = None
+ _max_youtube_results = 1000
+
+ def __init__(self, youtube_ie, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+ self._youtube_ie = youtube_ie
+
+ @staticmethod
+ def suitable(url):
+ return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
+
+ def report_download_page(self, query, pagenum):
+ """Report attempt to download playlist page with given number."""
+ self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+
+ def _real_initialize(self):
+ self._youtube_ie.initialize()
+
+ def _real_extract(self, query):
+ mobj = re.match(self._VALID_QUERY, query)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+ return
+
+ prefix, query = query.split(':')
+ prefix = prefix[8:]
+ if prefix == '':
+ self._download_n_results(query, 1)
+ return
+ elif prefix == 'all':
+ self._download_n_results(query, self._max_youtube_results)
+ return
+ else:
+ try:
+ n = long(prefix)
+ if n <= 0:
+ self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+ return
+ elif n > self._max_youtube_results:
+ self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
+ n = self._max_youtube_results
+ self._download_n_results(query, n)
+ return
+ except ValueError: # parsing prefix as integer fails
+ self._download_n_results(query, 1)
+ return
+
+ def _download_n_results(self, query, n):
+ """Downloads a specified number of results for a query"""
+
+ video_ids = []
+ already_seen = set()
+ pagenum = 1
+
+ while True:
+ self.report_download_page(query, pagenum)
+ result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+ request = urllib2.Request(result_url, None, std_headers)
+ try:
+ page = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
+
+ # Extract video identifiers
+ for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+ video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
+ if video_id not in already_seen:
+ video_ids.append(video_id)
+ already_seen.add(video_id)
+ if len(video_ids) == n:
+ # Specified n videos reached
+ for id in video_ids:
+ self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+ return
+
+ if self._MORE_PAGES_INDICATOR not in page:
+ for id in video_ids:
+ self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+ return
+
+ pagenum = pagenum + 1
class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists."""
- _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/view_play_list\?p=(.+)'
- _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s'
+ _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:view_play_list|my_playlists)\?.*?p=([^&]+).*'
+ _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
- _MORE_PAGES_INDICATOR = r'class="pagerNotCurrent">Next</a>'
+ _MORE_PAGES_INDICATOR = r'/view_play_list?p=%s&page=%s'
_youtube_ie = None
def __init__(self, youtube_ie, downloader=None):
def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number."""
- self.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+ self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_initialize(self):
self._youtube_ie.initialize()
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self.to_stderr(u'ERROR: invalid url: %s' % url)
- return [None]
+ self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ return
# Download playlist pages
playlist_id = mobj.group(1)
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self.to_stderr(u'ERROR: unable to download webpage: %s' % str(err))
- return [None]
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
# Extract video identifiers
- ids_in_page = set()
+ ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
- ids_in_page.add(mobj.group(1))
- video_ids.extend(list(ids_in_page))
+ if mobj.group(1) not in ids_in_page:
+ ids_in_page.append(mobj.group(1))
+ video_ids.extend(ids_in_page)
- if self._MORE_PAGES_INDICATOR not in page:
+ if (self._MORE_PAGES_INDICATOR % (playlist_id.upper(), pagenum + 1)) not in page:
break
pagenum = pagenum + 1
- information = []
for id in video_ids:
- information.extend(self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id))
- return information
+ self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+ return
+
+class PostProcessor(object):
+ """Post Processor class.
+ PostProcessor objects can be added to downloaders with their
+ add_post_processor() method. When the downloader has finished a
+ successful download, it will take its internal chain of PostProcessors
+ and start calling the run() method on each one of them, first with
+ an initial argument and then with the returned value of the previous
+ PostProcessor.
+
+ The chain will be stopped if one of them ever returns None or the end
+ of the chain is reached.
+
+ PostProcessor objects follow a "mutual registration" process similar
+ to InfoExtractor objects.
+ """
+
+ _downloader = None
+
+ def __init__(self, downloader=None):
+ self._downloader = downloader
+
+ def set_downloader(self, downloader):
+ """Sets the downloader for this PP."""
+ self._downloader = downloader
+
+ def run(self, information):
+ """Run the PostProcessor.
+
+ The "information" argument is a dictionary like the ones
+ composed by InfoExtractors. The only difference is that this
+ one has an extra field called "filepath" that points to the
+ downloaded file.
+
+ When this method returns None, the postprocessing chain is
+ stopped. However, this method may return an information
+ dictionary that will be passed to the next postprocessing
+ object in the chain. It can be the one it received after
+ changing some fields.
+
+ In addition, this method may raise a PostProcessingError
+ exception that will be taken into account by the downloader
+ it was called from.
+ """
+ return information # by default, do nothing
+
+### MAIN PROGRAM ###
if __name__ == '__main__':
try:
# Modules needed only when running the main program
# Parse command line
parser = optparse.OptionParser(
- usage='Usage: %prog [options] url...',
- version='2008.07.22',
- conflict_handler='resolve',
- )
+ usage='Usage: %prog [options] url...',
+ version='INTERNAL',
+ conflict_handler='resolve',
+ )
+
parser.add_option('-h', '--help',
action='help', help='print this help text and exit')
parser.add_option('-v', '--version',
action='version', help='print program version and exit')
- parser.add_option('-u', '--username',
+ parser.add_option('-i', '--ignore-errors',
+ action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
+ parser.add_option('-r', '--rate-limit',
+ dest='ratelimit', metavar='L', help='download rate limit (e.g. 50k or 44.6m)')
+
+ authentication = optparse.OptionGroup(parser, 'Authentication Options')
+ authentication.add_option('-u', '--username',
dest='username', metavar='UN', help='account username')
- parser.add_option('-p', '--password',
+ authentication.add_option('-p', '--password',
dest='password', metavar='PW', help='account password')
- parser.add_option('-o', '--output',
- dest='outtmpl', metavar='TPL', help='output filename template')
- parser.add_option('-q', '--quiet',
+ authentication.add_option('-n', '--netrc',
+ action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
+ parser.add_option_group(authentication)
+
+ video_format = optparse.OptionGroup(parser, 'Video Format Options')
+ video_format.add_option('-f', '--format',
+ action='store', dest='format', metavar='FMT', help='video format code')
+ video_format.add_option('-b', '--best-quality',
+ action='store_const', dest='format', help='download the best quality video possible', const='0')
+ video_format.add_option('-m', '--mobile-version',
+ action='store_const', dest='format', help='alias for -f 17', const='17')
+ video_format.add_option('-d', '--high-def',
+ action='store_const', dest='format', help='alias for -f 22', const='22')
+ parser.add_option_group(video_format)
+
+ verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+ verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
- parser.add_option('-s', '--simulate',
+ verbosity.add_option('-s', '--simulate',
action='store_true', dest='simulate', help='do not download video', default=False)
- parser.add_option('-t', '--title',
- action='store_true', dest='usetitle', help='use title in file name', default=False)
- parser.add_option('-l', '--literal',
- action='store_true', dest='useliteral', help='use literal title in file name', default=False)
- parser.add_option('-n', '--netrc',
- action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
- parser.add_option('-g', '--get-url',
+ verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
- parser.add_option('-e', '--get-title',
+ verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
- parser.add_option('-f', '--format',
- dest='format', metavar='FMT', help='video format code')
- parser.add_option('-b', '--best-quality',
- action='store_const', dest='format', help='alias for -f 18', const='18')
- parser.add_option('-m', '--mobile-version',
- action='store_const', dest='format', help='alias for -f 17', const='17')
- parser.add_option('-i', '--ignore-errors',
- action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
- parser.add_option('-r', '--rate-limit',
- dest='ratelimit', metavar='L', help='download rate limit (e.g. 50k or 44.6m)')
+ parser.add_option_group(verbosity)
+
+ filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
+ filesystem.add_option('-t', '--title',
+ action='store_true', dest='usetitle', help='use title in file name', default=False)
+ filesystem.add_option('-l', '--literal',
+ action='store_true', dest='useliteral', help='use literal title in file name', default=False)
+ filesystem.add_option('-o', '--output',
+ dest='outtmpl', metavar='TPL', help='output filename template')
+ filesystem.add_option('-a', '--batch-file',
+ dest='batchfile', metavar='F', help='file containing URLs to download')
+ filesystem.add_option('-w', '--no-overwrites',
+ action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+ filesystem.add_option('-c', '--continue',
+ action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
+ parser.add_option_group(filesystem)
+
(opts, args) = parser.parse_args()
+ # Batch file verification
+ batchurls = []
+ if opts.batchfile is not None:
+ try:
+ batchurls = open(opts.batchfile, 'r').readlines()
+ batchurls = [x.strip() for x in batchurls]
+ batchurls = [x for x in batchurls if len(x) > 0]
+ except IOError:
+ sys.exit(u'ERROR: batch file could not be read')
+ all_urls = batchurls + args
+
# Conflicting, missing and erroneous options
- if len(args) < 1:
- sys.exit(u'ERROR: you must provide at least one URL')
+ if len(all_urls) < 1:
+ parser.error(u'you must provide at least one URL')
if opts.usenetrc and (opts.username is not None or opts.password is not None):
- sys.exit(u'ERROR: using .netrc conflicts with giving username/password')
+ parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
- sys.exit(u'ERROR: account username missing')
+ parser.error(u'account username missing')
if opts.outtmpl is not None and (opts.useliteral or opts.usetitle):
- sys.exit(u'ERROR: using output template conflicts with using title or literal title')
+ parser.error(u'using output template conflicts with using title or literal title')
if opts.usetitle and opts.useliteral:
- sys.exit(u'ERROR: using title conflicts with using literal title')
+ parser.error(u'using title conflicts with using literal title')
if opts.username is not None and opts.password is None:
opts.password = getpass.getpass(u'Type account password and press return:')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
- sys.exit(u'ERROR: invalid rate limit specified')
+ parser.error(u'invalid rate limit specified')
opts.ratelimit = numeric_limit
# Information extractors
youtube_ie = YoutubeIE()
metacafe_ie = MetacafeIE(youtube_ie)
youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
+ youtube_search_ie = YoutubeSearchIE(youtube_ie)
# File downloader
fd = FileDownloader({
'forcetitle': opts.gettitle,
'simulate': (opts.simulate or opts.geturl or opts.gettitle),
'format': opts.format,
- 'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode())
+ 'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(locale.getpreferredencoding()))
or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
or u'%(id)s.%(ext)s'),
'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit,
+ 'nooverwrites': opts.nooverwrites,
+ 'continuedl': opts.continue_dl,
})
+ fd.add_info_extractor(youtube_search_ie)
fd.add_info_extractor(youtube_pl_ie)
fd.add_info_extractor(metacafe_ie)
fd.add_info_extractor(youtube_ie)
- retcode = fd.download(args)
+ retcode = fd.download(all_urls)
sys.exit(retcode)
except DownloadError: