-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-
import base64
import datetime
import itertools
import urllib
from .utils import *
-
-
from .extractor.common import InfoExtractor, SearchInfoExtractor
+
+from .extractor.ard import ARDIE
+from .extractor.arte import ArteTvIE
+from .extractor.bliptv import BlipTVIE, BlipTVUserIE
+from .extractor.comedycentral import ComedyCentralIE
from .extractor.dailymotion import DailymotionIE
+from .extractor.gametrailers import GametrailersIE
+from .extractor.generic import GenericIE
+from .extractor.googleplus import GooglePlusIE
+from .extractor.googlesearch import GoogleSearchIE
from .extractor.metacafe import MetacafeIE
+from .extractor.myvideo import MyVideoIE
from .extractor.statigram import StatigramIE
-from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE
-
-
-
-
-
-class PhotobucketIE(InfoExtractor):
- """Information extractor for photobucket.com."""
-
- # TODO: the original _VALID_URL was:
- # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
- # Check if it's necessary to keep the old extracion process
- _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
- IE_NAME = u'photobucket'
-
- def _real_extract(self, url):
- # Extract id from URL
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- video_id = mobj.group('id')
-
- video_extension = mobj.group('ext')
-
- # Retrieve video webpage to extract further information
- webpage = self._download_webpage(url, video_id)
-
- # Extract URL, uploader, and title from webpage
- self.report_extraction(video_id)
- # We try first by looking the javascript code:
- mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
- if mobj is not None:
- info = json.loads(mobj.group('json'))
- return [{
- 'id': video_id,
- 'url': info[u'downloadUrl'],
- 'uploader': info[u'username'],
- 'upload_date': datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
- 'title': info[u'title'],
- 'ext': video_extension,
- 'thumbnail': info[u'thumbUrl'],
- }]
-
- # We try looking in other parts of the webpage
- video_url = self._search_regex(r'<link rel="video_src" href=".*\?file=([^"]+)" />',
- webpage, u'video URL')
-
- mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract title')
- video_title = mobj.group(1).decode('utf-8')
- video_uploader = mobj.group(2).decode('utf-8')
-
- return [{
- 'id': video_id.decode('utf-8'),
- 'url': video_url.decode('utf-8'),
- 'uploader': video_uploader,
- 'upload_date': None,
- 'title': video_title,
- 'ext': video_extension.decode('utf-8'),
- }]
-
-
-class YahooIE(InfoExtractor):
- """Information extractor for screen.yahoo.com."""
- _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
- m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
-
- if m_id is None:
- # TODO: Check which url parameters are required
- info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
- webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
- info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
- <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
- <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
- <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
- '''
- self.report_extraction(video_id)
- m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
- if m_info is None:
- raise ExtractorError(u'Unable to extract video info')
- video_title = m_info.group('title')
- video_description = m_info.group('description')
- video_thumb = m_info.group('thumb')
- video_date = m_info.group('date')
- video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
-
- # TODO: Find a way to get mp4 videos
- rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
- webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
- m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
- video_url = m_rest.group('url')
- video_path = m_rest.group('path')
- if m_rest is None:
- raise ExtractorError(u'Unable to extract video url')
-
- else: # We have to use a different method if another id is defined
- long_id = m_id.group('new_id')
- info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
- webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
- json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
- info = json.loads(json_str)
- res = info[u'query'][u'results'][u'mediaObj'][0]
- stream = res[u'streams'][0]
- video_path = stream[u'path']
- video_url = stream[u'host']
- meta = res[u'meta']
- video_title = meta[u'title']
- video_description = meta[u'description']
- video_thumb = meta[u'thumbnail']
- video_date = None # I can't find it
-
- info_dict = {
- 'id': video_id,
- 'url': video_url,
- 'play_path': video_path,
- 'title':video_title,
- 'description': video_description,
- 'thumbnail': video_thumb,
- 'upload_date': video_date,
- 'ext': 'flv',
- }
- return info_dict
-
-class VimeoIE(InfoExtractor):
- """Information extractor for vimeo.com."""
-
- # _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
- IE_NAME = u'vimeo'
-
- def _verify_video_password(self, url, video_id, webpage):
- password = self._downloader.params.get('password', None)
- if password is None:
- raise ExtractorError(u'This video is protected by a password, use the --password option')
- token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
- data = compat_urllib_parse.urlencode({'password': password,
- 'token': token})
- # I didn't manage to use the password with https
- if url.startswith('https'):
- pass_url = url.replace('https','http')
- else:
- pass_url = url
- password_request = compat_urllib_request.Request(pass_url+'/password', data)
- password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- password_request.add_header('Cookie', 'xsrft=%s' % token)
- pass_web = self._download_webpage(password_request, video_id,
- u'Verifying the password',
- u'Wrong password')
-
- def _real_extract(self, url, new_video=True):
- # Extract ID from URL
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- video_id = mobj.group('id')
- if not mobj.group('proto'):
- url = 'https://' + url
- if mobj.group('direct_link') or mobj.group('pro'):
- url = 'https://vimeo.com/' + video_id
-
- # Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url, None, std_headers)
- webpage = self._download_webpage(request, video_id)
-
- # Now we begin extracting as much information as we can from what we
- # retrieved. First we extract the information common to all extractors,
- # and latter we extract those that are Vimeo specific.
- self.report_extraction(video_id)
-
- # Extract the config JSON
- try:
- config = webpage.split(' = {config:')[1].split(',assets:')[0]
- config = json.loads(config)
- except:
- if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
- raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
-
- if re.search('If so please provide the correct password.', webpage):
- self._verify_video_password(url, video_id, webpage)
- return self._real_extract(url)
- else:
- raise ExtractorError(u'Unable to extract info section')
-
- # Extract title
- video_title = config["video"]["title"]
-
- # Extract uploader and uploader_id
- video_uploader = config["video"]["owner"]["name"]
- video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
-
- # Extract video thumbnail
- video_thumbnail = config["video"]["thumbnail"]
-
- # Extract video description
- video_description = get_element_by_attribute("itemprop", "description", webpage)
- if video_description: video_description = clean_html(video_description)
- else: video_description = u''
-
- # Extract upload date
- video_upload_date = None
- mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
- if mobj is not None:
- video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
-
- # Vimeo specific: extract request signature and timestamp
- sig = config['request']['signature']
- timestamp = config['request']['timestamp']
-
- # Vimeo specific: extract video codec and quality information
- # First consider quality, then codecs, then take everything
- # TODO bind to format param
- codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
- files = { 'hd': [], 'sd': [], 'other': []}
- for codec_name, codec_extension in codecs:
- if codec_name in config["video"]["files"]:
- if 'hd' in config["video"]["files"][codec_name]:
- files['hd'].append((codec_name, codec_extension, 'hd'))
- elif 'sd' in config["video"]["files"][codec_name]:
- files['sd'].append((codec_name, codec_extension, 'sd'))
- else:
- files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
-
- for quality in ('hd', 'sd', 'other'):
- if len(files[quality]) > 0:
- video_quality = files[quality][0][2]
- video_codec = files[quality][0][0]
- video_extension = files[quality][0][1]
- self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
- break
- else:
- raise ExtractorError(u'No known codec found')
-
- video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
- %(video_id, sig, timestamp, video_quality, video_codec.upper())
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'uploader': video_uploader,
- 'uploader_id': video_uploader_id,
- 'upload_date': video_upload_date,
- 'title': video_title,
- 'ext': video_extension,
- 'thumbnail': video_thumbnail,
- 'description': video_description,
- }]
-
-
-class ArteTvIE(InfoExtractor):
- """arte.tv information extractor."""
-
- _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
- _LIVE_URL = r'index-[0-9]+\.html$'
-
- IE_NAME = u'arte.tv'
-
- def fetch_webpage(self, url):
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(url)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
- except ValueError as err:
- raise ExtractorError(u'Invalid URL: %s' % url)
- return webpage
-
- def grep_webpage(self, url, regex, regexFlags, matchTuples):
- page = self.fetch_webpage(url)
- mobj = re.search(regex, page, regexFlags)
- info = {}
-
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- for (i, key, err) in matchTuples:
- if mobj.group(i) is None:
- raise ExtractorError(err)
- else:
- info[key] = mobj.group(i)
-
- return info
-
- def extractLiveStream(self, url):
- video_lang = url.split('/')[-4]
- info = self.grep_webpage(
- url,
- r'src="(.*?/videothek_js.*?\.js)',
- 0,
- [
- (1, 'url', u'Invalid URL: %s' % url)
- ]
- )
- http_host = url.split('/')[2]
- next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
- info = self.grep_webpage(
- next_url,
- r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
- '(http://.*?\.swf).*?' +
- '(rtmp://.*?)\'',
- re.DOTALL,
- [
- (1, 'path', u'could not extract video path: %s' % url),
- (2, 'player', u'could not extract video player: %s' % url),
- (3, 'url', u'could not extract video url: %s' % url)
- ]
- )
- video_url = u'%s/%s' % (info.get('url'), info.get('path'))
-
- def extractPlus7Stream(self, url):
- video_lang = url.split('/')[-3]
- info = self.grep_webpage(
- url,
- r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
- 0,
- [
- (1, 'url', u'Invalid URL: %s' % url)
- ]
- )
- next_url = compat_urllib_parse.unquote(info.get('url'))
- info = self.grep_webpage(
- next_url,
- r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
- 0,
- [
- (1, 'url', u'Could not find <video> tag: %s' % url)
- ]
- )
- next_url = compat_urllib_parse.unquote(info.get('url'))
-
- info = self.grep_webpage(
- next_url,
- r'<video id="(.*?)".*?>.*?' +
- '<name>(.*?)</name>.*?' +
- '<dateVideo>(.*?)</dateVideo>.*?' +
- '<url quality="hd">(.*?)</url>',
- re.DOTALL,
- [
- (1, 'id', u'could not extract video id: %s' % url),
- (2, 'title', u'could not extract video title: %s' % url),
- (3, 'date', u'could not extract video date: %s' % url),
- (4, 'url', u'could not extract video url: %s' % url)
- ]
- )
-
- return {
- 'id': info.get('id'),
- 'url': compat_urllib_parse.unquote(info.get('url')),
- 'uploader': u'arte.tv',
- 'upload_date': unified_strdate(info.get('date')),
- 'title': info.get('title').decode('utf-8'),
- 'ext': u'mp4',
- 'format': u'NA',
- 'player_url': None,
- }
-
- def _real_extract(self, url):
- video_id = url.split('/')[-1]
- self.report_extraction(video_id)
-
- if re.search(self._LIVE_URL, video_id) is not None:
- self.extractLiveStream(url)
- return
- else:
- info = self.extractPlus7Stream(url)
-
- return [info]
-
-
-class GenericIE(InfoExtractor):
- """Generic last-resort information extractor."""
-
- _VALID_URL = r'.*'
- IE_NAME = u'generic'
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- if not self._downloader.params.get('test', False):
- self._downloader.report_warning(u'Falling back on generic information extractor.')
- super(GenericIE, self).report_download_webpage(video_id)
-
- def report_following_redirect(self, new_url):
- """Report information extraction."""
- self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
-
- def _test_redirect(self, url):
- """Check if it is a redirect, like url shorteners, in case return the new url."""
- class HeadRequest(compat_urllib_request.Request):
- def get_method(self):
- return "HEAD"
-
- class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
- """
- Subclass the HTTPRedirectHandler to make it use our
- HeadRequest also on the redirected URL
- """
- def redirect_request(self, req, fp, code, msg, headers, newurl):
- if code in (301, 302, 303, 307):
- newurl = newurl.replace(' ', '%20')
- newheaders = dict((k,v) for k,v in req.headers.items()
- if k.lower() not in ("content-length", "content-type"))
- return HeadRequest(newurl,
- headers=newheaders,
- origin_req_host=req.get_origin_req_host(),
- unverifiable=True)
- else:
- raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
-
- class HTTPMethodFallback(compat_urllib_request.BaseHandler):
- """
- Fallback to GET if HEAD is not allowed (405 HTTP error)
- """
- def http_error_405(self, req, fp, code, msg, headers):
- fp.read()
- fp.close()
-
- newheaders = dict((k,v) for k,v in req.headers.items()
- if k.lower() not in ("content-length", "content-type"))
- return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
- headers=newheaders,
- origin_req_host=req.get_origin_req_host(),
- unverifiable=True))
-
- # Build our opener
- opener = compat_urllib_request.OpenerDirector()
- for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
- HTTPMethodFallback, HEADRedirectHandler,
- compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
- opener.add_handler(handler())
-
- response = opener.open(HeadRequest(url))
- if response is None:
- raise ExtractorError(u'Invalid URL protocol')
- new_url = response.geturl()
-
- if url == new_url:
- return False
-
- self.report_following_redirect(new_url)
- return new_url
-
- def _real_extract(self, url):
- new_url = self._test_redirect(url)
- if new_url: return [self.url_result(new_url)]
-
- video_id = url.split('/')[-1]
- try:
- webpage = self._download_webpage(url, video_id)
- except ValueError as err:
- # since this is the last-resort InfoExtractor, if
- # this error is thrown, it'll be thrown here
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- self.report_extraction(video_id)
- # Start with something easy: JW Player in SWFObject
- mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
- if mobj is None:
- # Broaden the search a little bit
- mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
- if mobj is None:
- # Broaden the search a little bit: JWPlayer JS loader
- mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
- if mobj is None:
- # Try to find twitter cards info
- mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
- if mobj is None:
- # We look for Open Graph info:
- # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
- m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
- # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
- if m_video_type is not None:
- mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- # It's possible that one of the regexes
- # matched, but returned an empty group:
- if mobj.group(1) is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- video_url = compat_urllib_parse.unquote(mobj.group(1))
- video_id = os.path.basename(video_url)
-
- # here's a fun little line of code for you:
- video_extension = os.path.splitext(video_id)[1][1:]
- video_id = os.path.splitext(video_id)[0]
-
- # it's tempting to parse this further, but you would
- # have to take into account all the variations like
- # Video Title - Site Name
- # Site Name | Video Title
- # Video Title - Tagline | Site Name
- # and so on and so forth; it's just not practical
- video_title = self._html_search_regex(r'<title>(.*)</title>',
- webpage, u'video title')
-
- # video uploader is domain name
- video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
- url, u'video uploader')
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'uploader': video_uploader,
- 'upload_date': None,
- 'title': video_title,
- 'ext': video_extension,
- }]
-
-
-class YoutubeSearchIE(SearchInfoExtractor):
- """Information Extractor for YouTube search queries."""
- _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
- _MAX_RESULTS = 1000
- IE_NAME = u'youtube:search'
- _SEARCH_KEY = 'ytsearch'
-
- def report_download_page(self, query, pagenum):
- """Report attempt to download search page with given number."""
- self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
-
- def _get_n_results(self, query, n):
- """Get a specified number of results for a query"""
-
- video_ids = []
- pagenum = 0
- limit = n
-
- while (50 * pagenum) < limit:
- self.report_download_page(query, pagenum+1)
- result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
- request = compat_urllib_request.Request(result_url)
- try:
- data = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
- api_response = json.loads(data)['data']
-
- if not 'items' in api_response:
- raise ExtractorError(u'[youtube] No video results')
-
- new_ids = list(video['id'] for video in api_response['items'])
- video_ids += new_ids
-
- limit = min(n, api_response['totalItems'])
- pagenum += 1
-
- if len(video_ids) > n:
- video_ids = video_ids[:n]
- videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
- return self.playlist_result(videos, query)
-
-
-class GoogleSearchIE(SearchInfoExtractor):
- """Information Extractor for Google Video search queries."""
- _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
- _MAX_RESULTS = 1000
- IE_NAME = u'video.google:search'
- _SEARCH_KEY = 'gvsearch'
-
- def _get_n_results(self, query, n):
- """Get a specified number of results for a query"""
-
- res = {
- '_type': 'playlist',
- 'id': query,
- 'entries': []
- }
-
- for pagenum in itertools.count(1):
- result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
- webpage = self._download_webpage(result_url, u'gvsearch:' + query,
- note='Downloading result page ' + str(pagenum))
-
- for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
- e = {
- '_type': 'url',
- 'url': mobj.group(1)
- }
- res['entries'].append(e)
-
- if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
- return res
-
-class YahooSearchIE(SearchInfoExtractor):
- """Information Extractor for Yahoo! Video search queries."""
-
- _MAX_RESULTS = 1000
- IE_NAME = u'screen.yahoo:search'
- _SEARCH_KEY = 'yvsearch'
-
- def _get_n_results(self, query, n):
- """Get a specified number of results for a query"""
-
- res = {
- '_type': 'playlist',
- 'id': query,
- 'entries': []
- }
- for pagenum in itertools.count(0):
- result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
- webpage = self._download_webpage(result_url, query,
- note='Downloading results page '+str(pagenum+1))
- info = json.loads(webpage)
- m = info[u'm']
- results = info[u'results']
-
- for (i, r) in enumerate(results):
- if (pagenum * 30) +i >= n:
- break
- mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
- e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
- res['entries'].append(e)
- if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
- break
+from .extractor.photobucket import PhotobucketIE
+from .extractor.vimeo import VimeoIE
+from .extractor.yahoo import YahooIE, YahooSearchIE
+from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeSearchIE, YoutubeUserIE, YoutubeChannelIE
+from .extractor.zdf import ZDFIE
- return res
-class BlipTVUserIE(InfoExtractor):
- """Information Extractor for blip.tv users."""
- _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
- _PAGE_SIZE = 12
- IE_NAME = u'blip.tv:user'
- def _real_extract(self, url):
- # Extract username
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- username = mobj.group(1)
- page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
- page = self._download_webpage(url, username, u'Downloading user page')
- mobj = re.search(r'data-users-id="([^"]+)"', page)
- page_base = page_base % mobj.group(1)
- # Download video ids using BlipTV Ajax calls. Result size per
- # query is limited (currently to 12 videos) so we need to query
- # page by page until there are no video ids - it means we got
- # all of them.
- video_ids = []
- pagenum = 1
- while True:
- url = page_base + "&page=" + str(pagenum)
- page = self._download_webpage(url, username,
- u'Downloading video ids from page %d' % pagenum)
- # Extract video identifiers
- ids_in_page = []
- for mobj in re.finditer(r'href="/([^"]+)"', page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(unescapeHTML(mobj.group(1)))
- video_ids.extend(ids_in_page)
- # A little optimization - if current page is not
- # "full", ie. does not contain PAGE_SIZE video ids then
- # we can assume that this page is the last one - there
- # are no more ids on further pages - no need to query
- # again.
- if len(ids_in_page) < self._PAGE_SIZE:
- break
-
- pagenum += 1
-
- urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
- url_entries = [self.url_result(url, 'BlipTV') for url in urls]
- return [self.playlist_result(url_entries, playlist_title = username)]
class DepositFilesIE(InfoExtractor):
return [info]
-class BlipTVIE(InfoExtractor):
- """Information extractor for blip.tv"""
-
- _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
- _URL_EXT = r'^.*\.([a-z0-9]+)$'
- IE_NAME = u'blip.tv'
-
- def report_direct_download(self, title):
- """Report information extraction."""
- self.to_screen(u'%s: Direct download detected' % title)
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- # See https://github.com/rg3/youtube-dl/issues/857
- api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
- if api_mobj is not None:
- url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
- urlp = compat_urllib_parse_urlparse(url)
- if urlp.path.startswith('/play/'):
- request = compat_urllib_request.Request(url)
- response = compat_urllib_request.urlopen(request)
- redirecturl = response.geturl()
- rurlp = compat_urllib_parse_urlparse(redirecturl)
- file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
- url = 'http://blip.tv/a/a-' + file_id
- return self._real_extract(url)
-
-
- if '?' in url:
- cchar = '&'
- else:
- cchar = '?'
- json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
- request = compat_urllib_request.Request(json_url)
- request.add_header('User-Agent', 'iTunes/10.6.1')
- self.report_extraction(mobj.group(1))
- info = None
- try:
- urlh = compat_urllib_request.urlopen(request)
- if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
- basename = url.split('/')[-1]
- title,ext = os.path.splitext(basename)
- title = title.decode('UTF-8')
- ext = ext.replace('.', '')
- self.report_direct_download(title)
- info = {
- 'id': title,
- 'url': url,
- 'uploader': None,
- 'upload_date': None,
- 'title': title,
- 'ext': ext,
- 'urlhandle': urlh
- }
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
- if info is None: # Regular URL
- try:
- json_code_bytes = urlh.read()
- json_code = json_code_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
-
- try:
- json_data = json.loads(json_code)
- if 'Post' in json_data:
- data = json_data['Post']
- else:
- data = json_data
-
- upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
- video_url = data['media']['url']
- umobj = re.match(self._URL_EXT, video_url)
- if umobj is None:
- raise ValueError('Can not determine filename extension')
- ext = umobj.group(1)
-
- info = {
- 'id': data['item_id'],
- 'url': video_url,
- 'uploader': data['display_name'],
- 'upload_date': upload_date,
- 'title': data['title'],
- 'ext': ext,
- 'format': data['media']['mimeType'],
- 'thumbnail': data['thumbnailUrl'],
- 'description': data['description'],
- 'player_url': data['embedUrl'],
- 'user_agent': 'iTunes/10.6.1',
- }
- except (ValueError,KeyError) as err:
- raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
-
- return [info]
-
-
-class MyVideoIE(InfoExtractor):
- """Information Extractor for myvideo.de."""
-
- _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
- IE_NAME = u'myvideo'
-
- # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
- # Released into the Public Domain by Tristan Fischer on 2013-05-19
- # https://github.com/rg3/youtube-dl/pull/842
- def __rc4crypt(self,data, key):
- x = 0
- box = list(range(256))
- for i in list(range(256)):
- x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
- box[i], box[x] = box[x], box[i]
- x = 0
- y = 0
- out = ''
- for char in data:
- x = (x + 1) % 256
- y = (y + box[x]) % 256
- box[x], box[y] = box[y], box[x]
- out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
- return out
-
- def __md5(self,s):
- return hashlib.md5(s).hexdigest().encode()
-
- def _real_extract(self,url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'invalid URL: %s' % url)
-
- video_id = mobj.group(1)
-
- GK = (
- b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
- b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
- b'TnpsbA0KTVRkbU1tSTRNdz09'
- )
-
- # Get video webpage
- webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
- webpage = self._download_webpage(webpage_url, video_id)
-
- mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
- if mobj is not None:
- self.report_extraction(video_id)
- video_url = mobj.group(1) + '.flv'
-
- video_title = self._html_search_regex('<title>([^<]+)</title>',
- webpage, u'title')
-
- video_ext = self._search_regex('[.](.+?)$', video_url, u'extension')
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'uploader': None,
- 'upload_date': None,
- 'title': video_title,
- 'ext': u'flv',
- }]
-
- # try encxml
- mobj = re.search('var flashvars={(.+?)}', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video')
-
- params = {}
- encxml = ''
- sec = mobj.group(1)
- for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
- if not a == '_encxml':
- params[a] = b
- else:
- encxml = compat_urllib_parse.unquote(b)
- if not params.get('domain'):
- params['domain'] = 'www.myvideo.de'
- xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
- if 'flash_playertype=MTV' in xmldata_url:
- self._downloader.report_warning(u'avoiding MTV player')
- xmldata_url = (
- 'http://www.myvideo.de/dynamic/get_player_video_xml.php'
- '?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
- ) % video_id
-
- # get enc data
- enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
- enc_data_b = binascii.unhexlify(enc_data)
- sk = self.__md5(
- base64.b64decode(base64.b64decode(GK)) +
- self.__md5(
- str(video_id).encode('utf-8')
- )
- )
- dec_data = self.__rc4crypt(enc_data_b, sk)
-
- # extracting infos
- self.report_extraction(video_id)
- video_url = None
- mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
- if mobj:
- video_url = compat_urllib_parse.unquote(mobj.group(1))
- if 'myvideo2flash' in video_url:
- self._downloader.report_warning(u'forcing RTMPT ...')
- video_url = video_url.replace('rtmpe://', 'rtmpt://')
- if not video_url:
- # extract non rtmp videos
- mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
- if mobj is None:
- raise ExtractorError(u'unable to extract url')
- video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
-
- video_file = self._search_regex('source=\'(.*?)\'', dec_data, u'video file')
- video_file = compat_urllib_parse.unquote(video_file)
-
- if not video_file.endswith('f4m'):
- ppath, prefix = video_file.split('.')
- video_playpath = '%s:%s' % (prefix, ppath)
- video_hls_playlist = ''
- else:
- video_playpath = ''
- video_hls_playlist = (
- video_filepath + video_file
- ).replace('.f4m', '.m3u8')
- video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, u'swfobj')
- video_swfobj = compat_urllib_parse.unquote(video_swfobj)
-
- video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
- webpage, u'title')
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'tc_url': video_url,
- 'uploader': None,
- 'upload_date': None,
- 'title': video_title,
- 'ext': u'flv',
- 'play_path': video_playpath,
- 'video_file': video_file,
- 'video_hls_playlist': video_hls_playlist,
- 'player_url': video_swfobj,
- }]
-
-
-class ComedyCentralIE(InfoExtractor):
- """Information extractor for The Daily Show and Colbert Report """
-
- # urls can be abbreviations like :thedailyshow or :colbert
- # urls for episodes like:
- # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
- # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
- # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
- _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
- |(https?://)?(www\.)?
- (?P<showname>thedailyshow|colbertnation)\.com/
- (full-episodes/(?P<episode>.*)|
- (?P<clip>
- (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
- |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
- $"""
-
- _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
-
- _video_extensions = {
- '3500': 'mp4',
- '2200': 'mp4',
- '1700': 'mp4',
- '1200': 'mp4',
- '750': 'mp4',
- '400': 'mp4',
- }
- _video_dimensions = {
- '3500': '1280x720',
- '2200': '960x540',
- '1700': '768x432',
- '1200': '640x360',
- '750': '512x288',
- '400': '384x216',
- }
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
- def _print_formats(self, formats):
- print('Available formats:')
- for x in formats:
- print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
-
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- if mobj.group('shortname'):
- if mobj.group('shortname') in ('tds', 'thedailyshow'):
- url = u'http://www.thedailyshow.com/full-episodes/'
- else:
- url = u'http://www.colbertnation.com/full-episodes/'
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- assert mobj is not None
-
- if mobj.group('clip'):
- if mobj.group('showname') == 'thedailyshow':
- epTitle = mobj.group('tdstitle')
- else:
- epTitle = mobj.group('cntitle')
- dlNewest = False
- else:
- dlNewest = not mobj.group('episode')
- if dlNewest:
- epTitle = mobj.group('showname')
- else:
- epTitle = mobj.group('episode')
-
- self.report_extraction(epTitle)
- webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
- if dlNewest:
- url = htmlHandle.geturl()
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- if mobj is None:
- raise ExtractorError(u'Invalid redirected URL: ' + url)
- if mobj.group('episode') == '':
- raise ExtractorError(u'Redirected URL is still not specific: ' + url)
- epTitle = mobj.group('episode')
-
- mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
-
- if len(mMovieParams) == 0:
- # The Colbert Report embeds the information in a without
- # a URL prefix; so extract the alternate reference
- # and then add the URL prefix manually.
-
- altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
- if len(altMovieParams) == 0:
- raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
- else:
- mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
-
- uri = mMovieParams[0][1]
- indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
- indexXml = self._download_webpage(indexUrl, epTitle,
- u'Downloading show index',
- u'unable to download episode index')
-
- results = []
-
- idoc = xml.etree.ElementTree.fromstring(indexXml)
- itemEls = idoc.findall('.//item')
- for partNum,itemEl in enumerate(itemEls):
- mediaId = itemEl.findall('./guid')[0].text
- shortMediaId = mediaId.split(':')[-1]
- showId = mediaId.split(':')[-2].replace('.com', '')
- officialTitle = itemEl.findall('./title')[0].text
- officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
-
- configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
- compat_urllib_parse.urlencode({'uri': mediaId}))
- configXml = self._download_webpage(configUrl, epTitle,
- u'Downloading configuration for %s' % shortMediaId)
-
- cdoc = xml.etree.ElementTree.fromstring(configXml)
- turls = []
- for rendition in cdoc.findall('.//rendition'):
- finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
- turls.append(finfo)
-
- if len(turls) == 0:
- self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
- continue
-
- if self._downloader.params.get('listformats', None):
- self._print_formats([i[0] for i in turls])
- return
-
- # For now, just pick the highest bitrate
- format,rtmp_video_url = turls[-1]
-
- # Get the format arg from the arg stream
- req_format = self._downloader.params.get('format', None)
-
- # Select format if we can find one
- for f,v in turls:
- if f == req_format:
- format, rtmp_video_url = f, v
- break
-
- m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
- if not m:
- raise ExtractorError(u'Cannot transform RTMP url')
- base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
- video_url = base + m.group('finalid')
-
- effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
- info = {
- 'id': shortMediaId,
- 'url': video_url,
- 'uploader': showId,
- 'upload_date': officialDate,
- 'title': effTitle,
- 'ext': 'mp4',
- 'format': format,
- 'thumbnail': None,
- 'description': officialTitle,
- }
- results.append(info)
-
- return results
class EscapistIE(InfoExtractor):
}]
-class GooglePlusIE(InfoExtractor):
- """Information extractor for plus.google.com."""
-
- _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
- IE_NAME = u'plus.google'
-
- def _real_extract(self, url):
- # Extract id from URL
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- post_url = mobj.group(0)
- video_id = mobj.group(1)
-
- video_extension = 'flv'
-
- # Step 1, Retrieve post webpage to extract further information
- webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
-
- self.report_extraction(video_id)
-
- # Extract update date
- upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>',
- webpage, u'upload date', fatal=False)
- if upload_date:
- # Convert timestring to a format suitable for filename
- upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
- upload_date = upload_date.strftime('%Y%m%d')
-
- # Extract uploader
- uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
- webpage, u'uploader', fatal=False)
-
- # Extract title
- # Get the first line for title
- video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
- webpage, 'title', default=u'NA')
-
- # Step 2, Stimulate clicking the image box to launch video
- video_page = self._search_regex('"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]',
- webpage, u'video page URL')
- webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
-
- # Extract video links on video page
- """Extract video links of all sizes"""
- pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
- mobj = re.findall(pattern, webpage)
- if len(mobj) == 0:
- raise ExtractorError(u'Unable to extract video links')
-
- # Sort in resolution
- links = sorted(mobj)
-
- # Choose the lowest of the sort, i.e. highest resolution
- video_url = links[-1]
- # Only get the url. The resolution part in the tuple has no use anymore
- video_url = video_url[-1]
- # Treat escaped \u0026 style hex
- try:
- video_url = video_url.decode("unicode_escape")
- except AttributeError: # Python 3
- video_url = bytes(video_url, 'ascii').decode('unicode-escape')
-
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'uploader': uploader,
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': video_extension,
- }]
class NBAIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
return [info]
-class ARDIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
- _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
- _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
-
- def _real_extract(self, url):
- # determine video id from url
- m = re.match(self._VALID_URL, url)
-
- numid = re.search(r'documentId=([0-9]+)', url)
- if numid:
- video_id = numid.group(1)
- else:
- video_id = m.group('video_id')
-
- # determine title and media streams from webpage
- html = self._download_webpage(url, video_id)
- title = re.search(self._TITLE, html).group('title')
- streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
- if not streams:
- assert '"fsk"' in html
- raise ExtractorError(u'This video is only available after 8:00 pm')
-
- # choose default media type and highest quality for now
- stream = max([s for s in streams if int(s["media_type"]) == 0],
- key=lambda s: int(s["quality"]))
-
- # there's two possibilities: RTMP stream or HTTP download
- info = {'id': video_id, 'title': title, 'ext': 'mp4'}
- if stream['rtmp_url']:
- self.to_screen(u'RTMP download detected')
- assert stream['video_url'].startswith('mp4:')
- info["url"] = stream["rtmp_url"]
- info["play_path"] = stream['video_url']
- else:
- assert stream["video_url"].endswith('.mp4')
- info["url"] = stream["video_url"]
- return [info]
-
-class ZDFIE(InfoExtractor):
- _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
- _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
- _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
- _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
- _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group('video_id')
-
- html = self._download_webpage(url, video_id)
- streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
- if streams is None:
- raise ExtractorError(u'No media url found.')
-
- # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
- # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
- # choose first/default media type and highest quality for now
- for s in streams: #find 300 - dsl1000mbit
- if s['quality'] == '300' and s['media_type'] == 'wstreaming':
- stream_=s
- break
- for s in streams: #find veryhigh - dsl2000mbit
- if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
- stream_=s
- break
- if stream_ is None:
- raise ExtractorError(u'No stream found.')
-
- media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
-
- self.report_extraction(video_id)
- mobj = re.search(self._TITLE, html)
- if mobj is None:
- raise ExtractorError(u'Cannot extract title')
- title = unescapeHTML(mobj.group('title'))
-
- mobj = re.search(self._MMS_STREAM, media_link)
- if mobj is None:
- mobj = re.search(self._RTSP_STREAM, media_link)
- if mobj is None:
- raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
- mms_url = mobj.group('video_url')
- mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
- if mobj is None:
- raise ExtractorError(u'Cannot extract extention')
- ext = mobj.group('ext')
-
- return [{'id': video_id,
- 'url': mms_url,
- 'title': title,
- 'ext': ext
- }]
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
'thumbnail': thumbnail_url,
}]
-class GametrailersIE(InfoExtractor):
- _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group('id')
- video_type = mobj.group('type')
- webpage = self._download_webpage(url, video_id)
- if video_type == 'full-episodes':
- mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
- else:
- mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
- mgid = self._search_regex(mgid_re, webpage, u'mgid')
- data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
-
- info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
- video_id, u'Downloading video info')
- links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
- video_id, u'Downloading video urls info')
-
- self.report_extraction(video_id)
- info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
- <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
- '''
-
- m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
- if m_info is None:
- raise ExtractorError(u'Unable to extract video info')
- video_title = m_info.group('title')
- video_description = m_info.group('description')
- video_thumb = m_info.group('thumb')
-
- m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
- if m_urls is None or len(m_urls) == 0:
- raise ExtractError(u'Unable to extrat video url')
- # They are sorted from worst to best quality
- video_url = m_urls[-1].group('url')
-
- return {'url': video_url,
- 'id': video_id,
- 'title': video_title,
- # Videos are actually flv not mp4
- 'ext': 'flv',
- 'thumbnail': video_thumb,
- 'description': video_description,
- }
def gen_extractors():
""" Return a list of an instance of every supported extractor.