ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec.
nooverwrites: Prevent overwriting files.
+ continuedl: Try to continue downloads if possible.
"""
params = None
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
- return int(new_max)
+ return long(new_max)
rate = bytes / elapsed_time
if rate > new_max:
- return int(new_max)
+ return long(new_max)
if rate < new_min:
- return int(new_min)
- return int(rate)
+ return long(new_min)
+ return long(rate)
@staticmethod
def parse_bytes(bytestr):
"""Report download progress."""
self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
+
+ def report_resuming_byte(self, resume_len):
+ """Report attemtp to resume at given byte."""
+ self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
+
+ def report_file_already_downloaded(self, file_name):
+ """Report file has already been fully downloaded."""
+ self.to_stdout(u'[download] %s has already been downloaded' % file_name)
+
+ def report_unable_to_resume(self):
+ """Report it was impossible to resume download."""
+ self.to_stdout(u'[download] Unable to resume')
def report_finish(self):
"""Report download finished."""
return
try:
- outstream = open(filename, 'wb')
+ outstream = open(filename, 'ab')
except (OSError, IOError), err:
self.trouble('ERROR: unable to open for writing: %s' % str(err))
return
break
def _do_download(self, stream, url):
+ basic_request = urllib2.Request(url, None, std_headers)
request = urllib2.Request(url, None, std_headers)
- data = urllib2.urlopen(request)
+
+ # Resume transfer if filesize is non-zero
+ resume_len = stream.tell()
+ if self.params['continuedl'] and resume_len != 0:
+ self.report_resuming_byte(resume_len)
+ request.add_header('Range','bytes=%d-' % resume_len)
+ else:
+ stream.close()
+ stream = open(stream.name,'wb')
+ try:
+ data = urllib2.urlopen(request)
+ except urllib2.HTTPError, e:
+ if not e.code == 416: # 416 is 'Requested range not satisfiable'
+ raise
+ data = urllib2.urlopen(basic_request)
+ content_length = data.info()['Content-Length']
+ if content_length is not None and long(content_length) == resume_len:
+ self.report_file_already_downloaded(stream.name)
+ return
+ else:
+ self.report_unable_to_resume()
+ stream.close()
+ stream = open(stream.name,'wb')
+
data_len = data.info().get('Content-length', None)
data_len_str = self.format_bytes(data_len)
byte_counter = 0
return
else:
try:
- n = int(prefix)
+ n = long(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
n = self._max_youtube_results
self._download_n_results(query, n)
return
- except ValueError: # parsing prefix as int fails
+ except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
dest='batchfile', metavar='F', help='file containing URLs to download')
filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+ filesystem.add_option('-c', '--continue',
+ action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
parser.add_option_group(filesystem)
(opts, args) = parser.parse_args()
'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
+ 'continuedl': opts.continue_dl,
})
fd.add_info_extractor(youtube_search_ie)
fd.add_info_extractor(youtube_pl_ie)