]> gitweb @ CieloNegro.org - youtube-dl.git/commitdiff
Merge remote-tracking branch 'jaimeMF/f4m'
authorPhilipp Hagemeister <phihag@phihag.de>
Sat, 15 Feb 2014 14:32:13 +0000 (15:32 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Sat, 15 Feb 2014 14:32:13 +0000 (15:32 +0100)
Conflicts:
youtube_dl/extractor/__init__.py

32 files changed:
test/test_utils.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/bloomberg.py
youtube_dl/extractor/breakcom.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/chilloutzone.py
youtube_dl/extractor/cnn.py
youtube_dl/extractor/collegehumor.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dotsub.py
youtube_dl/extractor/dropbox.py
youtube_dl/extractor/escapist.py
youtube_dl/extractor/exfm.py
youtube_dl/extractor/firsttv.py [new file with mode: 0644]
youtube_dl/extractor/freesound.py
youtube_dl/extractor/gametrailers.py
youtube_dl/extractor/googleplus.py
youtube_dl/extractor/howcast.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/jadorecettepub.py
youtube_dl/extractor/kontrtube.py [new file with mode: 0644]
youtube_dl/extractor/lifenews.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/ndr.py
youtube_dl/extractor/slideshare.py
youtube_dl/extractor/streamcz.py [new file with mode: 0644]
youtube_dl/extractor/vesti.py [new file with mode: 0644]
youtube_dl/extractor/xtube.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/youtube.py
youtube_dl/utils.py
youtube_dl/version.py

index c68e0e96844213f6626c950bd89487b74223593e..97c408ebf76c3028223d6e0f359e55f7b8f6b2e4 100644 (file)
@@ -127,6 +127,7 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(unified_strdate('8/7/2009'), '20090708')
         self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
+        self.assertEqual(unified_strdate('1968-12-10'), '19681210')
 
     def test_find_xpath_attr(self):
         testxml = u'''<root>
index 97b53900c50924a47f17163dab063f09f7cb29f7..8715da7db50d8af6d1d0662468625e0cc60eeabb 100644 (file)
@@ -32,7 +32,10 @@ from .clipfish import ClipfishIE
 from .cliphunter import CliphunterIE
 from .clipsyndicate import ClipsyndicateIE
 from .cmt import CMTIE
-from .cnn import CNNIE
+from .cnn import (
+    CNNIE,
+    CNNBlogsIE,
+)
 from .collegehumor import CollegeHumorIE
 from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
 from .condenast import CondeNastIE
@@ -64,6 +67,7 @@ from .extremetube import ExtremeTubeIE
 from .facebook import FacebookIE
 from .faz import FazIE
 from .firstpost import FirstpostIE
+from .firsttv import FirstTVIE
 from .fktv import (
     FKTVIE,
     FKTVPosteckeIE,
@@ -115,6 +119,7 @@ from .keezmovies import KeezMoviesIE
 from .khanacademy import KhanAcademyIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
+from .kontrtube import KontrTubeIE
 from .la7 import LA7IE
 from .lifenews import LifeNewsIE
 from .liveleak import LiveLeakIE
@@ -203,6 +208,7 @@ from .stanfordoc import StanfordOpenClassroomIE
 from .statigram import StatigramIE
 from .steam import SteamIE
 from .streamcloud import StreamcloudIE
+from .streamcz import StreamCZIE
 from .syfy import SyfyIE
 from .sztvhu import SztvHuIE
 from .teamcoco import TeamcocoIE
@@ -225,6 +231,7 @@ from .ustream import UstreamIE, UstreamChannelIE
 from .vbox7 import Vbox7IE
 from .veehd import VeeHDIE
 from .veoh import VeohIE
+from .vesti import VestiIE
 from .vevo import VevoIE
 from .vice import ViceIE
 from .viddler import ViddlerIE
index df2cff81c603b5d85d0f6c82a0f36af509002301..2415ce4030521940a066af58796e438f0adc955a 100644 (file)
@@ -24,5 +24,7 @@ class BloombergIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         webpage = self._download_webpage(url, name)
-        ooyala_url = self._twitter_search_player(webpage)
-        return self.url_result(ooyala_url, OoyalaIE.ie_key())
+        embed_code = self._search_regex(
+            r'<source src="https?://[^/]+/[^/]+/[^/]+/([^/]+)', webpage,
+            'embed code')
+        return OoyalaIE._build_url_result(embed_code)
index 53a898de3707ce9a2f235d95e1d7fa0be58edb20..8ec6dda490c9c886463502c795bce4b361b9c326 100644 (file)
@@ -1,18 +1,20 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 from .common import InfoExtractor
-from ..utils import determine_ext
 
 
 class BreakIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
+    _VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
     _TEST = {
-        u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
-        u'file': u'2468056.mp4',
-        u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
-        u'info_dict': {
-            u"title": u"When Girls Act Like D-Bags"
+        'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
+        'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
+        'info_dict': {
+            'id': '2468056',
+            'ext': 'mp4',
+            'title': 'When Girls Act Like D-Bags',
         }
     }
 
@@ -22,17 +24,16 @@ class BreakIE(InfoExtractor):
         embed_url = 'http://www.break.com/embed/%s' % video_id
         webpage = self._download_webpage(embed_url, video_id)
         info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
-                                       u'info json', flags=re.DOTALL)
+                                       'info json', flags=re.DOTALL)
         info = json.loads(info_json)
         video_url = info['videoUri']
         m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
         if m_youtube is not None:
             return self.url_result(m_youtube.group(1), 'Youtube')
         final_url = video_url + '?' + info['AuthToken']
-        return [{
-            'id':        video_id,
-            'url':       final_url,
-            'ext':       determine_ext(final_url),
-            'title':     info['contentName'],
+        return {
+            'id': video_id,
+            'url': final_url,
+            'title': info['contentName'],
             'thumbnail': info['thumbUri'],
-        }]
+        }
index 031fe385d906dd8f4a53f8440955d325e5b0add1..83eec84d3cd446b75854accd8dd8c2c754ba4349 100644 (file)
@@ -17,6 +17,7 @@ from ..utils import (
 
     ExtractorError,
     unsmuggle_url,
+    unescapeHTML,
 )
 
 
@@ -139,7 +140,7 @@ class BrightcoveIE(InfoExtractor):
 
         url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
         if url_m:
-            return [url_m.group(1)]
+            return [unescapeHTML(url_m.group(1))]
 
         matches = re.findall(
             r'''(?sx)<object
index 524f06d7ad9e92cf981c1bf4efc31a84ee39d293..02d5ba52713f27412f85989749fde91970e48e36 100644 (file)
@@ -42,7 +42,7 @@ class ChilloutzoneIE(InfoExtractor):
             'id': '85523671',
             'ext': 'mp4',
             'title': 'The Sunday Times - Icons',
-            'description': 'md5:3e5e8e839f076a637c6b9406c8f25c4c',
+            'description': 'md5:3e1c0dc6047498d6728dcdaad0891762',
             'uploader': 'Us',
             'uploader_id': 'usfilms',
             'upload_date': '20140131'
index 80bf59ade7c3ff45dc04be324020abbaf69ee4e8..b32cb898010a0ad0e02e12f3b3a55c3769cc3979 100644 (file)
@@ -6,6 +6,7 @@ from .common import InfoExtractor
 from ..utils import (
     int_or_none,
     parse_duration,
+    url_basename,
 )
 
 
@@ -98,3 +99,28 @@ class CNNIE(InfoExtractor):
             'duration': duration,
             'upload_date': upload_date,
         }
+
+
+class CNNBlogsIE(InfoExtractor):
+    _VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+'
+    _TEST = {
+        'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/',
+        'md5': '3e56f97b0b6ffb4b79f4ea0749551084',
+        'info_dict': {
+            'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn',
+            'ext': 'mp4',
+            'title': 'Criminalizing journalism?',
+            'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.',
+            'upload_date': '20140209',
+        },
+        'add_ie': ['CNN'],
+    }
+
+    def _real_extract(self, url):
+        webpage = self._download_webpage(url, url_basename(url))
+        cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url')
+        return {
+            '_type': 'url',
+            'url': cnn_url,
+            'ie_key': CNNIE.ie_key(),
+        }
index ad7cf5e5f04329a206fc82ff7000bed71693df35..10c925dfe1c7d44d7d1f6abe6a72b79d12230abf 100644 (file)
@@ -42,7 +42,7 @@ class CollegeHumorIE(InfoExtractor):
             'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
             'uploader': 'Funnyplox TV',
             'uploader_id': 'funnyploxtv',
-            'description': 'md5:11812366244110c3523968aa74f02521',
+            'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
             'upload_date': '20140128',
         },
         'params': {
index 2c0c75604b96cfd08a283c0d93ef36f85b525922..84fca8ba0b2577696877c117a13fcc0a5ce40735 100644 (file)
@@ -271,8 +271,11 @@ class InfoExtractor(object):
 
     def _download_json(self, url_or_request, video_id,
                        note=u'Downloading JSON metadata',
-                       errnote=u'Unable to download JSON metadata'):
+                       errnote=u'Unable to download JSON metadata',
+                       transform_source=None):
         json_string = self._download_webpage(url_or_request, video_id, note, errnote)
+        if transform_source:
+            json_string = transform_source(json_string)
         try:
             return json.loads(json_string)
         except ValueError as ve:
index 0ee9a684eb4c66907634f9d4b603a46beec5a357..5ae0ad5b65cdf12a896a573db78e23494864fd23 100644 (file)
@@ -1,41 +1,42 @@
+from __future__ import unicode_literals
+
 import re
-import json
 import time
 
 from .common import InfoExtractor
 
 
 class DotsubIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
+    _VALID_URL = r'http://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
     _TEST = {
-        u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
-        u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
-        u'md5': u'0914d4d69605090f623b7ac329fea66e',
-        u'info_dict': {
-            u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
-            u"uploader": u"4v4l0n42",
-            u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism  and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
-            u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
-            u'upload_date': u'20101213',
+        'url': 'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
+        'md5': '0914d4d69605090f623b7ac329fea66e',
+        'info_dict': {
+            'id': 'aed3b8b2-1889-4df5-ae63-ad85f5572f27',
+            'ext': 'flv',
+            'title': 'Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary',
+            'uploader': '4v4l0n42',
+            'description': 'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism  and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
+            'thumbnail': 'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
+            'upload_date': '20101213',
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
-        info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
-        webpage = self._download_webpage(info_url, video_id)
-        info = json.loads(webpage)
+        video_id = mobj.group('id')
+        info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
+        info = self._download_json(info_url, video_id)
         date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
 
-        return [{
-            'id':          video_id,
-            'url':         info['mediaURI'],
-            'ext':         'flv',
-            'title':       info['title'],
-            'thumbnail':   info['screenshotURI'],
+        return {
+            'id': video_id,
+            'url': info['mediaURI'],
+            'ext': 'flv',
+            'title': info['title'],
+            'thumbnail': info['screenshotURI'],
             'description': info['description'],
-            'uploader':    info['user'],
-            'view_count':  info['numberOfViews'],
-            'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
-        }]
+            'uploader': info['user'],
+            'view_count': info['numberOfViews'],
+            'upload_date': '%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
+        }
index d74981eead1db6cf8b41b4473c36d7a736169c5e..41208c97691aafc1c2c96ed06d4a326bca8886a6 100644 (file)
@@ -10,11 +10,12 @@ from .common import InfoExtractor
 class DropboxIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
     _TEST = {
-        'url': 'https://www.dropbox.com/s/mcnzehi9wo55th4/20131219_085616.mp4',
-        'file': 'mcnzehi9wo55th4.mp4',
-        'md5': 'f6d65b1b326e82fd7ab7720bea3dacae',
+        'url': 'https://www.dropbox.com/s/0qr9sai2veej4f8/THE_DOCTOR_GAMES.mp4',
+        'md5': '8ae17c51172fb7f93bdd6a214cc8c896',
         'info_dict': {
-            'title': '20131219_085616'
+            'id': '0qr9sai2veej4f8',
+            'ext': 'mp4',
+            'title': 'THE_DOCTOR_GAMES'
         }
     }
 
index b1242f6bc457a41a9c8413eb851671acd05cc8c0..272dfe1f643208a31635dade0e561c8eb009aab7 100644 (file)
@@ -1,9 +1,9 @@
-import json
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_str,
     compat_urllib_parse,
 
     ExtractorError,
@@ -11,70 +11,68 @@ from ..utils import (
 
 
 class EscapistIE(InfoExtractor):
-    _VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
+    _VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<id>[0-9]+)-'
     _TEST = {
-        u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
-        u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4',
-        u'md5': u'ab3a706c681efca53f0a35f1415cf0d1',
-        u'info_dict': {
-            u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.", 
-            u"uploader": u"the-escapist-presents", 
-            u"title": u"Breaking Down Baldur's Gate"
+        'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
+        'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
+        'info_dict': {
+            'id': '6618',
+            'ext': 'mp4',
+            'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
+            'uploader': 'the-escapist-presents',
+            'title': "Breaking Down Baldur's Gate",
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         showName = mobj.group('showname')
-        videoId = mobj.group('episode')
+        video_id = mobj.group('id')
 
-        self.report_extraction(videoId)
-        webpage = self._download_webpage(url, videoId)
+        self.report_extraction(video_id)
+        webpage = self._download_webpage(url, video_id)
 
         videoDesc = self._html_search_regex(
             r'<meta name="description" content="([^"]*)"',
-            webpage, u'description', fatal=False)
+            webpage, 'description', fatal=False)
 
         playerUrl = self._og_search_video_url(webpage, name=u'player URL')
 
         title = self._html_search_regex(
             r'<meta name="title" content="([^"]*)"',
-            webpage, u'title').split(' : ')[-1]
+            webpage, 'title').split(' : ')[-1]
 
-        configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL')
+        configUrl = self._search_regex('config=(.*)$', playerUrl, 'config URL')
         configUrl = compat_urllib_parse.unquote(configUrl)
 
         formats = []
 
-        def _add_format(name, cfgurl):
-            configJSON = self._download_webpage(
-                cfgurl, videoId,
-                u'Downloading ' + name + ' configuration',
-                u'Unable to download ' + name + ' configuration')
-
-            # Technically, it's JavaScript, not JSON
-            configJSON = configJSON.replace("'", '"')
+        def _add_format(name, cfgurl, quality):
+            config = self._download_json(
+                cfgurl, video_id,
+                'Downloading ' + name + ' configuration',
+                'Unable to download ' + name + ' configuration',
+                transform_source=lambda s: s.replace("'", '"'))
 
-            try:
-                config = json.loads(configJSON)
-            except (ValueError,) as err:
-                raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
             playlist = config['playlist']
             formats.append({
                 'url': playlist[1]['url'],
                 'format_id': name,
+                'quality': quality,
             })
 
-        _add_format(u'normal', configUrl)
+        _add_format('normal', configUrl, quality=0)
         hq_url = (configUrl +
                   ('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))
         try:
-            _add_format(u'hq', hq_url)
+            _add_format('hq', hq_url, quality=1)
         except ExtractorError:
             pass  # That's fine, we'll just use normal quality
 
+        self._sort_formats(formats)
+
         return {
-            'id': videoId,
+            'id': video_id,
             'formats': formats,
             'uploader': showName,
             'title': title,
index 682901d16227e088e203bd01656db21cc2f70dda..4de02aee9b2ef46d54e43964560410b82ac3aff9 100644 (file)
@@ -1,56 +1,58 @@
+from __future__ import unicode_literals
+
 import re
-import json
 
 from .common import InfoExtractor
 
 
 class ExfmIE(InfoExtractor):
-    IE_NAME = u'exfm'
-    IE_DESC = u'ex.fm'
-    _VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)'
-    _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
+    IE_NAME = 'exfm'
+    IE_DESC = 'ex.fm'
+    _VALID_URL = r'http://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)'
+    _SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
     _TESTS = [
         {
-            u'url': u'http://ex.fm/song/eh359',
-            u'file': u'44216187.mp3',
-            u'md5': u'e45513df5631e6d760970b14cc0c11e7',
-            u'info_dict': {
-                u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
-                u"uploader": u"deadjournalist",
-                u'upload_date': u'20120424',
-                u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
+            'url': 'http://ex.fm/song/eh359',
+            'md5': 'e45513df5631e6d760970b14cc0c11e7',
+            'info_dict': {
+                'id': '44216187',
+                'ext': 'mp3',
+                'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive',
+                'uploader': 'deadjournalist',
+                'upload_date': '20120424',
+                'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
             },
-            u'note': u'Soundcloud song',
-            u'skip': u'The site is down too often',
+            'note': 'Soundcloud song',
+            'skip': 'The site is down too often',
         },
         {
-            u'url': u'http://ex.fm/song/wddt8',
-            u'file': u'wddt8.mp3',
-            u'md5': u'966bd70741ac5b8570d8e45bfaed3643',
-            u'info_dict': {
-                u'title': u'Safe and Sound',
-                u'uploader': u'Capital Cities',
+            'url': 'http://ex.fm/song/wddt8',
+            'md5': '966bd70741ac5b8570d8e45bfaed3643',
+            'info_dict': {
+                'id': 'wddt8',
+                'ext': 'mp3',
+                'title': 'Safe and Sound',
+                'uploader': 'Capital Cities',
             },
-            u'skip': u'The site is down too often',
+            'skip': 'The site is down too often',
         },
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        song_id = mobj.group(1)
-        info_url = "http://ex.fm/api/v3/song/%s" %(song_id)
-        webpage = self._download_webpage(info_url, song_id)
-        info = json.loads(webpage)
-        song_url = info['song']['url']
+        song_id = mobj.group('id')
+        info_url = "http://ex.fm/api/v3/song/%s" % song_id
+        info = self._download_json(info_url, song_id)['song']
+        song_url = info['url']
         if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
             self.to_screen('Soundcloud song detected')
-            return self.url_result(song_url.replace('/stream',''), 'Soundcloud')
-        return [{
-            'id':          song_id,
-            'url':         song_url,
-            'ext':         'mp3',
-            'title':       info['song']['title'],
-            'thumbnail':   info['song']['image']['large'],
-            'uploader':    info['song']['artist'],
-            'view_count':  info['song']['loved_count'],
-        }]
+            return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
+        return {
+            'id': song_id,
+            'url': song_url,
+            'ext': 'mp3',
+            'title': info['title'],
+            'thumbnail': info['image']['large'],
+            'uploader': info['artist'],
+            'view_count': info['loved_count'],
+        }
diff --git a/youtube_dl/extractor/firsttv.py b/youtube_dl/extractor/firsttv.py
new file mode 100644 (file)
index 0000000..c2e987f
--- /dev/null
@@ -0,0 +1,60 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class FirstTVIE(InfoExtractor):
+    IE_NAME = 'firsttv'
+    IE_DESC = 'Видеоархив - Первый канал'
+    _VALID_URL = r'http://(?:www\.)?1tv\.ru/videoarchive/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://www.1tv.ru/videoarchive/73390',
+        'md5': '3de6390cf0cca4a5eae1d1d83895e5ad',
+        'info_dict': {
+            'id': '73390',
+            'ext': 'mp4',
+            'title': 'Олимпийские канатные дороги',
+            'description': 'md5:cc730d2bf4215463e37fff6a1e277b13',
+            'thumbnail': 'http://img1.1tv.ru/imgsize640x360/PR20140210114657.JPG',
+            'duration': 149,
+        },
+        'skip': 'Only works from Russia',
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id, 'Downloading page')
+
+        video_url = self._html_search_regex(
+            r'''(?s)jwplayer\('flashvideoportal_1'\)\.setup\({.*?'file': '([^']+)'.*?}\);''', webpage, 'video URL')
+
+        title = self._html_search_regex(
+            r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', webpage, 'title')
+        description = self._html_search_regex(
+            r'<div class="descr">\s*<div>&nbsp;</div>\s*<p>([^<]*)</p></div>', webpage, 'description', fatal=False)
+
+        thumbnail = self._og_search_thumbnail(webpage)
+        duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
+
+        like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
+            webpage, 'like count', fatal=False)
+        dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
+            webpage, 'dislike count', fatal=False)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'thumbnail': thumbnail,
+            'title': title,
+            'description': description,
+            'duration': int_or_none(duration),
+            'like_count': int_or_none(like_count),
+            'dislike_count': int_or_none(dislike_count),
+        }
\ No newline at end of file
index de14b12e5da28b61c9c23fc568c4a3d497e7e24a..5ff62af2a33d1743709bdb076dc0c80be0e3156b 100644 (file)
@@ -1,18 +1,21 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
-from ..utils import determine_ext
+
 
 class FreesoundIE(InfoExtractor):
-    _VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
+    _VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
     _TEST = {
-        u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/',
-        u'file': u'194503.mp3',
-        u'md5': u'12280ceb42c81f19a515c745eae07650',
-        u'info_dict': {
-            u"title": u"gulls in the city.wav",
-            u"uploader" : u"miklovan",
-            u'description': u'the sounds of seagulls in the city',
+        'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
+        'md5': '12280ceb42c81f19a515c745eae07650',
+        'info_dict': {
+            'id': '194503',
+            'ext': 'mp3',
+            'title': 'gulls in the city.wav',
+            'uploader': 'miklovan',
+            'description': 'the sounds of seagulls in the city',
         }
     }
 
@@ -20,17 +23,17 @@ class FreesoundIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         music_id = mobj.group('id')
         webpage = self._download_webpage(url, music_id)
-        title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
-                                webpage, 'music title', flags=re.DOTALL)
-        music_url = self._og_search_property('audio', webpage, 'music url')
-        description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>',
-                                webpage, 'description', fatal=False, flags=re.DOTALL)
+        title = self._html_search_regex(
+            r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
+            webpage, 'music title', flags=re.DOTALL)
+        description = self._html_search_regex(
+            r'<div id="sound_description">(.*?)</div>', webpage, 'description',
+            fatal=False, flags=re.DOTALL)
 
-        return [{
-            'id':       music_id,
-            'title':    title,            
-            'url':      music_url,
+        return {
+            'id': music_id,
+            'title': title,
+            'url': self._og_search_property('audio', webpage, 'music url'),
             'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
-            'ext':      determine_ext(music_url),
             'description': description,
-        }]
+        }
index 66b3b50d46b4224b4f79dedbd98aad8d8e19bcac..a6ab795aef1bab4a56b2655515983aed35886a77 100644 (file)
@@ -7,10 +7,11 @@ class GametrailersIE(MTVServicesInfoExtractor):
     _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
     _TEST = {
         'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
-        'file': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4',
         'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7',
         'info_dict': {
-            'title': 'Mirror\'s Edge 2|E3 2013: Debut Trailer',
+            'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d',
+            'ext': 'mp4',
+            'title': 'E3 2013: Debut Trailer',
             'description': 'Faith is back!  Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
         },
     }
index 2570746b2047a1d1ae0a60b48970b1414f168e40..cc29a7e5df2059096afe89ecf7175f317a755e94 100644 (file)
@@ -1,4 +1,5 @@
 # coding: utf-8
+from __future__ import unicode_literals
 
 import datetime
 import re
@@ -10,32 +11,28 @@ from ..utils import (
 
 
 class GooglePlusIE(InfoExtractor):
-    IE_DESC = u'Google Plus'
-    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
-    IE_NAME = u'plus.google'
+    IE_DESC = 'Google Plus'
+    _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
+    IE_NAME = 'plus.google'
     _TEST = {
-        u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
-        u"file": u"ZButuJc6CtH.flv",
-        u"info_dict": {
-            u"upload_date": u"20120613",
-            u"uploader": u"井上ヨシマサ",
-            u"title": u"嘆きの天使 降臨"
+        'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
+        'info_dict': {
+            'id': 'ZButuJc6CtH',
+            'ext': 'flv',
+            'upload_date': '20120613',
+            'uploader': '井上ヨシマサ',
+            'title': '嘆きの天使 降臨',
         }
     }
 
     def _real_extract(self, url):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
 
-        post_url = mobj.group(0)
-        video_id = mobj.group(1)
-
-        video_extension = 'flv'
+        video_id = mobj.group('id')
 
         # Step 1, Retrieve post webpage to extract further information
-        webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
+        webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
 
         self.report_extraction(video_id)
 
@@ -43,7 +40,7 @@ class GooglePlusIE(InfoExtractor):
         upload_date = self._html_search_regex(
             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, u'upload date', fatal=False, flags=re.VERBOSE)
+            webpage, 'upload date', fatal=False, flags=re.VERBOSE)
         if upload_date:
             # Convert timestring to a format suitable for filename
             upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
@@ -51,28 +48,27 @@ class GooglePlusIE(InfoExtractor):
 
         # Extract uploader
         uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
-            webpage, u'uploader', fatal=False)
+            webpage, 'uploader', fatal=False)
 
         # Extract title
         # Get the first line for title
         video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
-            webpage, 'title', default=u'NA')
+            webpage, 'title', default='NA')
 
         # Step 2, Simulate clicking the image box to launch video
         DOMAIN = 'https://plus.google.com/'
         video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
-            webpage, u'video page URL')
+            webpage, 'video page URL')
         if not video_page.startswith(DOMAIN):
             video_page = DOMAIN + video_page
 
-        webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
+        webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
 
-        # Extract video links on video page
-        """Extract video links of all sizes"""
+        # Extract video links all sizes
         pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
         mobj = re.findall(pattern, webpage)
         if len(mobj) == 0:
-            raise ExtractorError(u'Unable to extract video links')
+            raise ExtractorError('Unable to extract video links')
 
         # Sort in resolution
         links = sorted(mobj)
@@ -87,12 +83,11 @@ class GooglePlusIE(InfoExtractor):
         except AttributeError: # Python 3
             video_url = bytes(video_url, 'ascii').decode('unicode-escape')
 
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
+        return {
+            'id': video_id,
+            'url': video_url,
             'uploader': uploader,
-            'upload_date':  upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
+            'upload_date': upload_date,
+            'title': video_title,
+            'ext': 'flv',
+        }
index bafc5826f680353af40b820609a543192ac73d17..6ae04782c1aabb27ee6973819810f1e6f763b8b0 100644 (file)
@@ -1,17 +1,20 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 
 
 class HowcastIE(InfoExtractor):
-    _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
     _TEST = {
-        u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
-        u'file': u'390161.mp4',
-        u'md5': u'8b743df908c42f60cf6496586c7f12c3',
-        u'info_dict': {
-            u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.", 
-            u"title": u"How to Tie a Square Knot Properly"
+        'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
+        'md5': '8b743df908c42f60cf6496586c7f12c3',
+        'info_dict': {
+            'id': '390161',
+            'ext': 'mp4',
+            'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 
+            'title': 'How to Tie a Square Knot Properly',
         }
     }
 
@@ -24,22 +27,15 @@ class HowcastIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
-            webpage, u'video URL')
-
-        video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
-            webpage, u'title')
+            webpage, 'video URL')
 
         video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
-            webpage, u'description', fatal=False)
+            webpage, 'description', fatal=False)
 
-        thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
-            webpage, u'thumbnail', fatal=False)
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      'mp4',
-            'title':    video_title,
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': self._og_search_title(webpage),
             'description': video_description,
-            'thumbnail': thumbnail,
-        }]
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index 660573d022d267b1dfbf0d7274083f5ae47e9953..63141af272ac077ed97dcd5baf4c5a0dcb7d3b47 100644 (file)
@@ -1,35 +1,39 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 
+
 class InstagramIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?instagram\.com/p/(.*?)/'
+    _VALID_URL = r'http://instagram\.com/p/(?P<id>.*?)/'
     _TEST = {
-        u'url': u'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
-        u'file': u'aye83DjauH.mp4',
-        u'md5': u'0d2da106a9d2631273e192b372806516',
-        u'info_dict': {
-            u"uploader_id": u"naomipq", 
-            u"title": u"Video by naomipq",
-            u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
+        'url': 'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
+        'md5': '0d2da106a9d2631273e192b372806516',
+        'info_dict': {
+            'id': 'aye83DjauH',
+            'ext': 'mp4',
+            'uploader_id': 'naomipq',
+            'title': 'Video by naomipq',
+            'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
+        video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
-            webpage, u'uploader id', fatal=False)
-        desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description',
+            webpage, 'uploader id', fatal=False)
+        desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
             fatal=False)
 
-        return [{
-            'id':        video_id,
-            'url':       self._og_search_video_url(webpage, secure=False),
-            'ext':       'mp4',
-            'title':     u'Video by %s' % uploader_id,
+        return {
+            'id': video_id,
+            'url': self._og_search_video_url(webpage, secure=False),
+            'ext': 'mp4',
+            'title': 'Video by %s' % uploader_id,
             'thumbnail': self._og_search_thumbnail(webpage),
-            'uploader_id' : uploader_id,
+            'uploader_id': uploader_id,
             'description': desc,
-        }]
+        }
index d918fff81cdf329c418d6cd65f176d7c08036311..ace08769bd7671619db448696a016d08fd453e67 100644 (file)
@@ -2,7 +2,6 @@
 
 from __future__ import unicode_literals
 
-import json
 import re
 
 from .common import InfoExtractor
diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py
new file mode 100644 (file)
index 0000000..1b45b67
--- /dev/null
@@ -0,0 +1,66 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class KontrTubeIE(InfoExtractor):
+    IE_NAME = 'kontrtube'
+    IE_DESC = 'KontrTube.ru - Труба зовёт'
+    _VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/.+'
+
+    _TEST = {
+        'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
+        'md5': '975a991a4926c9a85f383a736a2e6b80',
+        'info_dict': {
+            'id': '2678',
+            'ext': 'mp4',
+            'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
+            'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
+            'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
+            'duration': 270,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id, 'Downloading page')
+
+        video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
+        thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
+        title = self._html_search_regex(r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage,
+            'video title')
+        description = self._html_search_meta('description', webpage, 'video description')
+
+        mobj = re.search(r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
+            webpage)
+        duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+
+        view_count = self._html_search_regex(r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', webpage,
+            'view count', fatal=False)
+        view_count = int(view_count) if view_count is not None else None
+
+        comment_count = None
+        comment_str = self._html_search_regex(r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count',
+            fatal=False)
+        if comment_str.startswith('комментариев нет'):
+            comment_count = 0
+        else:
+            mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str)
+            if mobj:
+                comment_count = int(mobj.group('total'))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'thumbnail': thumbnail,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count,
+        }
\ No newline at end of file
index 0512598578847b35611f81b539afeb860cdd0013..7b7185f9adb69f37dee1e4c4b468de8a5a95a556 100644 (file)
@@ -4,19 +4,23 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+    int_or_none,
+    unified_strdate
+)
 
 
 class LifeNewsIE(InfoExtractor):
     IE_NAME = 'lifenews'
     IE_DESC = 'LIFE | NEWS'
     _VALID_URL = r'http://lifenews\.ru/(?:mobile/)?news/(?P<id>\d+)'
-    
+
     _TEST = {
         'url': 'http://lifenews.ru/news/126342',
-        'file': '126342.mp4',
         'md5': 'e1b50a5c5fb98a6a544250f2e0db570a',
         'info_dict': {
+            'id': '126342',
+            'ext': 'mp4',
             'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
             'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
             'thumbnail': 'http://lifenews.ru/static/posts/2014/1/126342/.video.jpg',
@@ -32,7 +36,7 @@ class LifeNewsIE(InfoExtractor):
 
         video_url = self._html_search_regex(
             r'<video.*?src="([^"]+)".*?></video>', webpage, 'video URL')
-        
+
         thumbnail = self._html_search_regex(
             r'<video.*?poster="([^"]+)".*?"></video>', webpage, 'video thumbnail')
 
@@ -44,12 +48,14 @@ class LifeNewsIE(InfoExtractor):
         description = self._og_search_description(webpage)
 
         view_count = self._html_search_regex(
-            r'<div class=\'views\'>(\d+)</div>', webpage, 'view count')
+            r'<div class=\'views\'>(\d+)</div>', webpage, 'view count', fatal=False)
         comment_count = self._html_search_regex(
-            r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count')
+            r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
 
         upload_date = self._html_search_regex(
-            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date')
+            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
+        if upload_date is not None:
+            upload_date = unified_strdate(upload_date)
 
         return {
             'id': video_id,
@@ -57,7 +63,7 @@ class LifeNewsIE(InfoExtractor):
             'thumbnail': thumbnail,
             'title': title,
             'description': description,
-            'view_count': view_count,
-            'comment_count': comment_count,
-            'upload_date': unified_strdate(upload_date),
+            'view_count': int_or_none(view_count),
+            'comment_count': int_or_none(comment_count),
+            'upload_date': upload_date,
         }
\ No newline at end of file
index 6f5180892756aec148c0e0b6172e007fe291f623..5447b6c0cab098b895eda0e9f2b3b266fb65a7b0 100644 (file)
@@ -82,12 +82,12 @@ class MTVServicesInfoExtractor(InfoExtractor):
             title_el = find_xpath_attr(
                 itemdoc, './/{http://search.yahoo.com/mrss/}category',
                 'scheme', 'urn:mtvn:video_title')
+        if title_el is None:
+            title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
         if title_el is None:
             title_el = itemdoc.find('.//title')
             if title_el.text is None:
                 title_el = None
-        if title_el is None:
-            title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
 
         title = title_el.text
         if title is None:
index bf6782d7d83bf5173fc0aa1ce939513a08d853b8..811ef52013ab7c3416da2f647323e4e4992dda97 100644 (file)
@@ -13,22 +13,22 @@ class NDRIE(InfoExtractor):
     _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html'
 
     _TESTS = [
-        # video
         {
-            'url': 'http://www.ndr.de/fernsehen/sendungen/hallo_niedersachsen/media/hallonds19925.html',
-            'md5': '20eba151ff165f386643dad9c1da08f7',
+            'url': 'http://www.ndr.de/fernsehen/sendungen/markt/markt7959.html',
+            'md5': 'e7a6079ca39d3568f4996cb858dd6708',
+            'note': 'Video file',
             'info_dict': {
-                'id': '19925',
+                'id': '7959',
                 'ext': 'mp4',
-                'title': 'Hallo Niedersachsen  ',
-                'description': 'Bei Hallo Niedersachsen um 19:30 Uhr erfahren Sie alles, was am Tag in Niedersachsen los war.',
-                'duration': 1722,
+                'title': 'Markt - die ganze Sendung',
+                'description': 'md5:af9179cf07f67c5c12dc6d9997e05725',
+                'duration': 2655,
             },
         },
-        # audio
         {
             'url': 'http://www.ndr.de/903/audio191719.html',
             'md5': '41ed601768534dd18a9ae34d84798129',
+            'note': 'Audio file',
             'info_dict': {
                 'id': '191719',
                 'ext': 'mp3',
index afc3001b57f404486e2fa3a9c911bf4eec663b46..9c62825cc7f7cab2a4023a74e36307b48c280bd6 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
@@ -12,11 +14,12 @@ class SlideshareIE(InfoExtractor):
     _VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'
 
     _TEST = {
-        u'url': u'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
-        u'file': u'25665706.mp4',
-        u'info_dict': {
-            u'title': u'Managing Scale and Complexity',
-            u'description': u'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix',
+        'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
+        'info_dict': {
+            'id': '25665706',
+            'ext': 'mp4',
+            'title': 'Managing Scale and Complexity',
+            'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
         },
     }
 
@@ -26,15 +29,17 @@ class SlideshareIE(InfoExtractor):
         webpage = self._download_webpage(url, page_title)
         slideshare_obj = self._search_regex(
             r'var slideshare_object =  ({.*?}); var user_info =',
-            webpage, u'slideshare object')
+            webpage, 'slideshare object')
         info = json.loads(slideshare_obj)
-        if info['slideshow']['type'] != u'video':
-            raise ExtractorError(u'Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
+        if info['slideshow']['type'] != 'video':
+            raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
 
         doc = info['doc']
         bucket = info['jsplayer']['video_bucket']
         ext = info['jsplayer']['video_extension']
         video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
+        description = self._html_search_regex(
+            r'<p class="description.*?"[^>]*>(.*?)</p>', webpage, 'description')
 
         return {
             '_type': 'video',
@@ -43,5 +48,5 @@ class SlideshareIE(InfoExtractor):
             'ext': ext,
             'url': video_url,
             'thumbnail': info['slideshow']['pin_image_url'],
-            'description': self._og_search_description(webpage),
+            'description': description,
         }
diff --git a/youtube_dl/extractor/streamcz.py b/youtube_dl/extractor/streamcz.py
new file mode 100644 (file)
index 0000000..7362904
--- /dev/null
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class StreamCZIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<videoid>.+)'
+
+    _TEST = {
+        'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti',
+        'md5': '6d3ca61a8d0633c9c542b92fcb936b0c',
+        'info_dict': {
+            'id': '765767',
+            'ext': 'mp4',
+            'title': 'Peklo na talíři: Éčka pro děti',
+            'description': 'md5:49ace0df986e95e331d0fe239d421519',
+            'thumbnail': 'http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
+            'duration': 256,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+
+        webpage = self._download_webpage(url, video_id)
+
+        data = self._html_search_regex(r'Stream\.Data\.Episode\((.+?)\);', webpage, 'stream data')
+
+        jsonData = json.loads(data)
+
+        formats = []
+        for video in jsonData['instances']:
+            for video_format in video['instances']:
+                format_id = video_format['quality']
+
+                if format_id == '240p':
+                    quality = 0
+                elif format_id == '360p':
+                    quality = 1
+                elif format_id == '480p':
+                    quality = 2
+                elif format_id == '720p':
+                    quality = 3
+
+                formats.append({
+                    'format_id': '%s-%s' % (video_format['type'].split('/')[1], format_id),
+                    'url': video_format['source'],
+                    'quality': quality,
+                })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': str(jsonData['id']),
+            'title': self._og_search_title(webpage),
+            'thumbnail': jsonData['episode_image_original_url'].replace('//', 'http://'),
+            'formats': formats,
+            'description': self._og_search_description(webpage),
+            'duration': int_or_none(jsonData['duration']),
+            'view_count': int_or_none(jsonData['stats_total']),
+        }
diff --git a/youtube_dl/extractor/vesti.py b/youtube_dl/extractor/vesti.py
new file mode 100644 (file)
index 0000000..7773cec
--- /dev/null
@@ -0,0 +1,171 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    int_or_none
+)
+
+
+class VestiIE(InfoExtractor):
+    IE_NAME = 'vesti'
+    IE_DESC = 'Вести.Ru'
+    _VALID_URL = r'http://(?:.+?\.)?vesti\.ru/(?P<id>.+)'
+
+    _TESTS = [
+        {
+            'url': 'http://www.vesti.ru/videos?vid=575582&cid=1',
+            'info_dict': {
+                'id': '765035',
+                'ext': 'mp4',
+                'title': 'Вести.net: биткоины в России не являются законными',
+                'description': 'md5:d4bb3859dc1177b28a94c5014c35a36b',
+                'duration': 302,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.vesti.ru/only_video.html?vid=576180',
+            'info_dict': {
+                'id': '766048',
+                'ext': 'mp4',
+                'title': 'США заморозило, Британию затопило',
+                'description': 'md5:f0ed0695ec05aed27c56a70a58dc4cc1',
+                'duration': 87,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://sochi2014.vesti.ru/video/index/video_id/766403',
+            'info_dict': {
+                'id': '766403',
+                'ext': 'mp4',
+                'title': 'XXII зимние Олимпийские игры. Российские хоккеисты стартовали на Олимпиаде с победы',
+                'description': 'md5:55805dfd35763a890ff50fa9e35e31b3',
+                'duration': 271,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+            'skip': 'Blocked outside Russia'
+        },
+        {
+            'url': 'http://sochi2014.vesti.ru/live/play/live_id/301',
+            'info_dict': {
+                'id': '51499',
+                'ext': 'flv',
+                'title': 'Сочи-2014. Биатлон. Индивидуальная гонка. Мужчины ',
+                'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+            'skip': 'Translation has finished'
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        page = self._download_webpage(url, video_id, 'Downloading page')
+
+        mobj = re.search(r'<meta property="og:video" content=".+?\.swf\?v?id=(?P<id>\d+).*?" />', page)
+        if mobj:
+            video_type = 'video'
+            video_id = mobj.group('id')
+        else:
+            mobj = re.search(
+                r'<div.+?id="current-video-holder".*?>\s*<iframe src="http://player\.rutv\.ru/iframe/(?P<type>[^/]+)/id/(?P<id>\d+)[^"]*"',
+                page)
+
+            if not mobj:
+                raise ExtractorError('No media found')
+
+            video_type = mobj.group('type')
+            video_id = mobj.group('id')
+
+        json_data = self._download_json(
+            'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if video_type == 'live' else '', video_id),
+            video_id, 'Downloading JSON')
+
+        if json_data['errors']:
+            raise ExtractorError('vesti returned error: %s' % json_data['errors'], expected=True)
+
+        playlist = json_data['data']['playlist']
+        medialist = playlist['medialist']
+        media = medialist[0]
+
+        if media['errors']:
+            raise ExtractorError('vesti returned error: %s' % media['errors'], expected=True)
+
+        view_count = playlist.get('count_views')
+        priority_transport = playlist['priority_transport']
+
+        thumbnail = media['picture']
+        width = media['width']
+        height = media['height']
+        description = media['anons']
+        title = media['title']
+        duration = int_or_none(media.get('duration'))
+
+        formats = []
+
+        for transport, links in media['sources'].items():
+            for quality, url in links.items():
+                if transport == 'rtmp':
+                    mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>.+)$', url)
+                    if not mobj:
+                        continue
+                    fmt = {
+                        'url': mobj.group('url'),
+                        'play_path': mobj.group('playpath'),
+                        'app': mobj.group('app'),
+                        'page_url': 'http://player.rutv.ru',
+                        'player_url': 'http://player.rutv.ru/flash2v/osmf.swf?i=22',
+                        'rtmp_live': True,
+                        'ext': 'flv',
+                        'vbr': int(quality),
+                    }
+                elif transport == 'm3u8':
+                    fmt = {
+                        'url': url,
+                        'ext': 'mp4',
+                    }
+                else:
+                    fmt = {
+                        'url': url
+                    }
+                fmt.update({
+                    'width': width,
+                    'height': height,
+                    'format_id': '%s-%s' % (transport, quality),
+                    'preference': -1 if priority_transport == transport else -2,
+                })
+                formats.append(fmt)
+
+        if not formats:
+            raise ExtractorError('No media links available for %s' % video_id)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'view_count': view_count,
+            'duration': duration,
+            'formats': formats,
+        }
\ No newline at end of file
index 1a6a7688d435bd275777aeb4ba5425cf56d00267..982619922d8ef5fdd0f260902b0253f5dce024dd 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import re
 
@@ -10,14 +12,14 @@ from ..utils import (
 class XTubeIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<videoid>[^/?&]+))'
     _TEST = {
-        u'url': u'http://www.xtube.com/watch.php?v=kVTUy_G222_',
-        u'file': u'kVTUy_G222_.mp4',
-        u'md5': u'092fbdd3cbe292c920ef6fc6a8a9cdab',
-        u'info_dict': {
-            u"title": u"strange erotica",
-            u"description": u"surreal gay themed erotica...almost an ET kind of thing",
-            u"uploader": u"greenshowers",
-            u"age_limit": 18,
+        'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
+        'file': 'kVTUy_G222_.mp4',
+        'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
+        'info_dict': {
+            "title": "strange erotica",
+            "description": "surreal gay themed erotica...almost an ET kind of thing",
+            "uploader": "greenshowers",
+            "age_limit": 18,
         }
     }
 
@@ -30,10 +32,10 @@ class XTubeIE(InfoExtractor):
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
-        video_title = self._html_search_regex(r'<div class="p_5px[^>]*>([^<]+)', webpage, u'title')
-        video_uploader = self._html_search_regex(r'so_s\.addVariable\("owner_u", "([^"]+)', webpage, u'uploader', fatal=False)
-        video_description = self._html_search_regex(r'<p class="video_description">([^<]+)', webpage, u'description', fatal=False)
-        video_url= self._html_search_regex(r'var videoMp4 = "([^"]+)', webpage, u'video_url').replace('\\/', '/')
+        video_title = self._html_search_regex(r'<div class="p_5px[^>]*>([^<]+)', webpage, 'title')
+        video_uploader = self._html_search_regex(r'so_s\.addVariable\("owner_u", "([^"]+)', webpage, 'uploader', fatal=False)
+        video_description = self._html_search_regex(r'<p class="video_description">([^<]+)', webpage, 'description', fatal=False)
+        video_url= self._html_search_regex(r'var videoMp4 = "([^"]+)', webpage, 'video_url').replace('\\/', '/')
         path = compat_urllib_parse_urlparse(video_url).path
         extension = os.path.splitext(path)[1][1:]
         format = path.split('/')[5].split('_')[:2]
index e17a39782bd2e674855dff8a5ec3112bd40158c6..d92d14f718158f285b2696944afb155fdd664538 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import itertools
 import json
 import re
@@ -12,25 +14,25 @@ from ..utils import (
 
 
 class YahooIE(InfoExtractor):
-    IE_DESC = u'Yahoo screen'
+    IE_DESC = 'Yahoo screen'
     _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
     _TESTS = [
         {
-            u'url': u'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
-            u'file': u'214727115.mp4',
-            u'md5': u'4962b075c08be8690a922ee026d05e69',
-            u'info_dict': {
-                u'title': u'Julian Smith & Travis Legg Watch Julian Smith',
-                u'description': u'Julian and Travis watch Julian Smith',
+            'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
+            'file': '214727115.mp4',
+            'md5': '4962b075c08be8690a922ee026d05e69',
+            'info_dict': {
+                'title': 'Julian Smith & Travis Legg Watch Julian Smith',
+                'description': 'Julian and Travis watch Julian Smith',
             },
         },
         {
-            u'url': u'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
-            u'file': u'103000935.mp4',
-            u'md5': u'd6e6fc6e1313c608f316ddad7b82b306',
-            u'info_dict': {
-                u'title': u'Codefellas - The Cougar Lies with Spanish Moss',
-                u'description': u'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
+            'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
+            'file': '103000935.mp4',
+            'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
+            'info_dict': {
+                'title': 'Codefellas - The Cougar Lies with Spanish Moss',
+                'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
             },
         },
     ]
@@ -41,7 +43,7 @@ class YahooIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         items_json = self._search_regex(r'mediaItems: ({.*?})$',
-            webpage, u'items', flags=re.MULTILINE)
+            webpage, 'items', flags=re.MULTILINE)
         items = json.loads(items_json)
         info = items['mediaItems']['query']['results']['mediaObj'][0]
         # The 'meta' field is not always in the video webpage, we request it
@@ -60,7 +62,7 @@ class YahooIE(InfoExtractor):
         })
         query_result_json = self._download_webpage(
             'http://video.query.yahoo.com/v1/public/yql?' + data,
-            video_id, u'Downloading video info')
+            video_id, 'Downloading video info')
         query_result = json.loads(query_result_json)
         info = query_result['query']['results']['mediaObj'][0]
         meta = info['meta']
@@ -103,13 +105,13 @@ class YahooNewsIE(YahooIE):
     _VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
 
     _TEST = {
-        u'url': u'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
-        u'md5': u'67010fdf3a08d290e060a4dd96baa07b',
-        u'info_dict': {
-            u'id': u'104538833',
-            u'ext': u'mp4',
-            u'title': u'China Moses Is Crazy About the Blues',
-            u'description': u'md5:9900ab8cd5808175c7b3fe55b979bed0',
+        'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
+        'md5': '67010fdf3a08d290e060a4dd96baa07b',
+        'info_dict': {
+            'id': '104538833',
+            'ext': 'mp4',
+            'title': 'China Moses Is Crazy About the Blues',
+            'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
         },
     }
 
@@ -120,14 +122,14 @@ class YahooNewsIE(YahooIE):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
-        long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, u'long id')
+        long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
         return self._get_info(long_id, video_id)
 
 
 class YahooSearchIE(SearchInfoExtractor):
-    IE_DESC = u'Yahoo screen search'
+    IE_DESC = 'Yahoo screen search'
     _MAX_RESULTS = 1000
-    IE_NAME = u'screen.yahoo:search'
+    IE_NAME = 'screen.yahoo:search'
     _SEARCH_KEY = 'yvsearch'
 
     def _get_n_results(self, query, n):
@@ -139,12 +141,12 @@ class YahooSearchIE(SearchInfoExtractor):
             'entries': []
         }
         for pagenum in itertools.count(0): 
-            result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
+            result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
             webpage = self._download_webpage(result_url, query,
                                              note='Downloading results page '+str(pagenum+1))
             info = json.loads(webpage)
-            m = info[u'm']
-            results = info[u'results']
+            m = info['m']
+            results = info['results']
 
             for (i, r) in enumerate(results):
                 if (pagenum * 30) +i >= n:
@@ -152,7 +154,7 @@ class YahooSearchIE(SearchInfoExtractor):
                 mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
                 e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
                 res['entries'].append(e)
-            if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1)):
+            if (pagenum * 30 +i >= n) or (m['last'] >= (m['total'] -1)):
                 break
 
         return res
index e038c7752219e4dc9ee2fed84269595188b1b361..8c2c4dfa20b65146545f59671663e143b9f398f4 100644 (file)
@@ -34,6 +34,7 @@ from ..utils import (
     unified_strdate,
     orderedSet,
     write_json_file,
+    uppercase_escape,
 )
 
 class YoutubeBaseInfoExtractor(InfoExtractor):
@@ -136,7 +137,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                          (?:https?://|//)?                                    # http(s):// or protocol-independent URL (optional)
                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
                             (?:www\.)?deturl\.com/www\.youtube\.com/|
-                            (?:www\.)?pwnyoutube\.com|
+                            (?:www\.)?pwnyoutube\.com/|
                             tube\.majestyc\.net/|
                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
@@ -1590,11 +1591,10 @@ class YoutubeChannelIE(InfoExtractor):
             # Download all channel pages using the json-based channel_ajax query
             for pagenum in itertools.count(1):
                 url = self._MORE_PAGES_URL % (pagenum, channel_id)
-                page = self._download_webpage(url, channel_id,
-                                              u'Downloading page #%s' % pagenum)
-    
-                page = json.loads(page)
-    
+                page = self._download_json(
+                    url, channel_id, note=u'Downloading page #%s' % pagenum,
+                    transform_source=uppercase_escape)
+
                 ids_in_page = self.extract_videos_from_page(page['content_html'])
                 video_ids.extend(ids_in_page)
     
index 01c8c017d53a90477871f0ad4b6ef15250cc5e5b..67c6af5070bbe214221b288649049913bfccadf5 100644 (file)
@@ -756,9 +756,9 @@ def unified_strdate(date_str):
     """Return a string with the date in the format YYYYMMDD"""
     upload_date = None
     #Replace commas
-    date_str = date_str.replace(',',' ')
+    date_str = date_str.replace(',', ' ')
     # %z (UTC offset) is only supported in python>=3.2
-    date_str = re.sub(r' ?(\+|-)[0-9:]*$', '', date_str)
+    date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
     format_expressions = [
         '%d %B %Y',
         '%B %d %Y',
@@ -1214,3 +1214,9 @@ class PagedList(object):
             if end == nextfirstid:
                 break
         return res
+
+
+def uppercase_escape(s):
+    return re.sub(
+        r'\\U([0-9a-fA-F]{8})',
+        lambda m: compat_chr(int(m.group(1), base=16)), s)
index 6ece5d4b675a5bcd35e33b6a74e4ecb17596d465..cfcadd3d1a7cd104b6923a10ebab4b4fe5be3e6b 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2014.02.08.2'
+__version__ = '2014.02.13'