#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import base64 import datetime import itertools import netrc import os import re import socket import time import email.utils import xml.etree.ElementTree import random import math import operator import hashlib import binascii import urllib from .utils import * from .extractor.common import InfoExtractor, SearchInfoExtractor from .extractor.dailymotion import DailymotionIE from .extractor.metacafe import MetacafeIE from .extractor.statigram import StatigramIE from .extractor.photobucket import PhotobucketIE from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE class YahooIE(InfoExtractor): """Information extractor for screen.yahoo.com.""" _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P\d*?)\.html' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P.+?)";', webpage) if m_id is None: # TODO: Check which url parameters are required info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage') info_re = r'''<!\[CDATA\[(?P<title>.*?)\]\]>.* .*?)\]\]>.* .*?)\ .*\]\]>.* https?://)?(?:(?:www|player)\.)?vimeo(?Ppro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?Pplay_redirect_hls\?clip_id=)?(?:videos?/)?(?P[0-9]+)' IE_NAME = u'vimeo' def _verify_video_password(self, url, video_id, webpage): password = self._downloader.params.get('password', None) if password is None: raise ExtractorError(u'This video is protected by a password, use the --password option') token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1) data = compat_urllib_parse.urlencode({'password': password, 'token': token}) # I didn't manage to use the password with https if url.startswith('https'): pass_url = url.replace('https','http') else: pass_url = url password_request = compat_urllib_request.Request(pass_url+'/password', data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') password_request.add_header('Cookie', 'xsrft=%s' % token) pass_web = self._download_webpage(password_request, video_id, u'Verifying the password', u'Wrong password') def _real_extract(self, url, new_video=True): # Extract ID from URL mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('id') if not mobj.group('proto'): url = 'https://' + url if mobj.group('direct_link') or mobj.group('pro'): url = 'https://vimeo.com/' + video_id # Retrieve video webpage to extract further information request = compat_urllib_request.Request(url, None, std_headers) webpage = self._download_webpage(request, video_id) # Now we begin extracting as much information as we can from what we # retrieved. First we extract the information common to all extractors, # and latter we extract those that are Vimeo specific. self.report_extraction(video_id) # Extract the config JSON try: config = webpage.split(' = {config:')[1].split(',assets:')[0] config = json.loads(config) except: if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage): raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option') if re.search('If so please provide the correct password.', webpage): self._verify_video_password(url, video_id, webpage) return self._real_extract(url) else: raise ExtractorError(u'Unable to extract info section') # Extract title video_title = config["video"]["title"] # Extract uploader and uploader_id video_uploader = config["video"]["owner"]["name"] video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None # Extract video thumbnail video_thumbnail = config["video"]["thumbnail"] # Extract video description video_description = get_element_by_attribute("itemprop", "description", webpage) if video_description: video_description = clean_html(video_description) else: video_description = u'' # Extract upload date video_upload_date = None mobj = re.search(r' 0: video_quality = files[quality][0][2] video_codec = files[quality][0][0] video_extension = files[quality][0][1] self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) break else: raise ExtractorError(u'No known codec found') video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ %(video_id, sig, timestamp, video_quality, video_codec.upper()) return [{ 'id': video_id, 'url': video_url, 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'upload_date': video_upload_date, 'title': video_title, 'ext': video_extension, 'thumbnail': video_thumbnail, 'description': video_description, }] class ArteTvIE(InfoExtractor): """arte.tv information extractor.""" _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' _LIVE_URL = r'index-[0-9]+\.html$' IE_NAME = u'arte.tv' def fetch_webpage(self, url): request = compat_urllib_request.Request(url) try: self.report_download_webpage(url) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err)) except ValueError as err: raise ExtractorError(u'Invalid URL: %s' % url) return webpage def grep_webpage(self, url, regex, regexFlags, matchTuples): page = self.fetch_webpage(url) mobj = re.search(regex, page, regexFlags) info = {} if mobj is None: raise ExtractorError(u'Invalid URL: %s' % url) for (i, key, err) in matchTuples: if mobj.group(i) is None: raise ExtractorError(err) else: info[key] = mobj.group(i) return info def extractLiveStream(self, url): video_lang = url.split('/')[-4] info = self.grep_webpage( url, r'src="(.*?/videothek_js.*?\.js)', 0, [ (1, 'url', u'Invalid URL: %s' % url) ] ) http_host = url.split('/')[2] next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) info = self.grep_webpage( next_url, r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + '(http://.*?\.swf).*?' + '(rtmp://.*?)\'', re.DOTALL, [ (1, 'path', u'could not extract video path: %s' % url), (2, 'player', u'could not extract video player: %s' % url), (3, 'url', u'could not extract video url: %s' % url) ] ) video_url = u'%s/%s' % (info.get('url'), info.get('path')) def extractPlus7Stream(self, url): video_lang = url.split('/')[-3] info = self.grep_webpage( url, r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', 0, [ (1, 'url', u'Invalid URL: %s' % url) ] ) next_url = compat_urllib_parse.unquote(info.get('url')) info = self.grep_webpage( next_url, r'