]> gitweb @ CieloNegro.org - youtube-dl.git/commitdiff
Merge remote-tracking branch 'aft90/merge-output-format'
authorPhilipp Hagemeister <phihag@phihag.de>
Sat, 10 Jan 2015 00:59:14 +0000 (01:59 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Sat, 10 Jan 2015 00:59:14 +0000 (01:59 +0100)
Conflicts:
youtube_dl/YoutubeDL.py

test/helper.py
youtube_dl/YoutubeDL.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/common.py
youtube_dl/extractor/elpais.py
youtube_dl/extractor/netzkino.py [new file with mode: 0644]
youtube_dl/extractor/wdr.py
youtube_dl/extractor/youtube.py
youtube_dl/utils.py
youtube_dl/version.py

index 77225e4f799755e2927a3105fa1974f08779fc8d..c416f388cbfe335678269b0226cc10708c49a850 100644 (file)
@@ -110,6 +110,20 @@ def expect_info_dict(self, got_dict, expected_dict):
         else:
             if isinstance(expected, compat_str) and expected.startswith('md5:'):
                 got = 'md5:' + md5(got_dict.get(info_field))
+            elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
+                got = got_dict.get(info_field)
+                self.assertTrue(
+                    isinstance(got, list),
+                    'Expected field %s to be a list, but it is of type %s' % (
+                        info_field, type(got).__name__))
+                expected_num = int(expected.partition(':')[2])
+                assertGreaterEqual(
+                    self, len(got), expected_num,
+                    'Expected %d items in field %s, but only got %d' % (
+                        expected_num, info_field, len(got)
+                    )
+                )
+                continue
             else:
                 got = got_dict.get(info_field)
             self.assertEqual(expected, got,
index 6d5c401dfa3418350d03105e3efe537cecc7d911..61675d8ec8d7e6c2b7a3bb143dfcb3355bd1fe0a 100755 (executable)
@@ -203,6 +203,7 @@ class YoutubeDL(object):
 
                        Progress hooks are guaranteed to be called at least once
                        (with status "finished") if the download is successful.
+    merge_output_format: Extension to use when merging formats.
 
 
     The following parameters are not used by YoutubeDL itself, they are used by
@@ -909,10 +910,23 @@ class YoutubeDL(object):
                                                   'contain the video, try using '
                                                   '"-f %s+%s"' % (format_2, format_1))
                                 return
+                            output_ext = (
+                                formats_info[0]['ext']
+                                if self.params.get('merge_output_format') is None
+                                else self.params['merge_output_format'])
                             selected_format = {
                                 'requested_formats': formats_info,
                                 'format': rf,
-                                'ext': self.params['merge_output_format'] if self.params['merge_output_format'] is not None else formats_info[0]['ext'],
+                                'ext': formats_info[0]['ext'],
+                                'width': formats_info[0].get('width'),
+                                'height': formats_info[0].get('height'),
+                                'resolution': formats_info[0].get('resolution'),
+                                'fps': formats_info[0].get('fps'),
+                                'vcodec': formats_info[0].get('vcodec'),
+                                'vbr': formats_info[0].get('vbr'),
+                                'acodec': formats_info[1].get('acodec'),
+                                'abr': formats_info[1].get('abr'),
+                                'ext': output_ext,
                             }
                         else:
                             selected_format = None
index 8dacc2c54a24f39f67678a22be9511363e92b095..5da7568ca2767bbbcc7887895fb4f3096fd96999 100644 (file)
@@ -274,6 +274,7 @@ from .nbc import (
 )
 from .ndr import NDRIE
 from .ndtv import NDTVIE
+from .netzkino import NetzkinoIE
 from .nerdcubed import NerdCubedFeedIE
 from .newgrounds import NewgroundsIE
 from .newstube import NewstubeIE
index d703893dcfef1e772f1e294b0c08430ee6c15db3..b4cd59e4318a52019e060250499b1d50d1e01a8b 100644 (file)
@@ -147,6 +147,17 @@ class InfoExtractor(object):
     like_count:     Number of positive ratings of the video
     dislike_count:  Number of negative ratings of the video
     comment_count:  Number of comments on the video
+    comments:       A list of comments, each with one or more of the following
+                    properties (all but one of text or html optional):
+                        * "author" - human-readable name of the comment author
+                        * "author_id" - user ID of the comment author
+                        * "id" - Comment ID
+                        * "html" - Comment as HTML
+                        * "text" - Plain text of the comment
+                        * "timestamp" - UNIX timestamp of comment
+                        * "parent" - ID of the comment this one is replying to.
+                                     Set to "root" to indicate that this is a
+                                     comment to the original video.
     age_limit:      Age restriction for the video, as an integer (years)
     webpage_url:    The url to the video webpage, if given to youtube-dl it
                     should allow to get the same result again. (It will be set
index 4277202a2eea45afdcd750e3e22e651d5ac9342c..00a69e6312aede6069e062c6abff29137939daa9 100644 (file)
@@ -1,8 +1,6 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import unified_strdate
 
@@ -24,9 +22,7 @@ class ElPaisIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         prefix = self._html_search_regex(
diff --git a/youtube_dl/extractor/netzkino.py b/youtube_dl/extractor/netzkino.py
new file mode 100644 (file)
index 0000000..93567d1
--- /dev/null
@@ -0,0 +1,86 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    clean_html,
+    int_or_none,
+    js_to_json,
+    parse_iso8601,
+)
+
+
+class NetzkinoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/(?P<category>[^/]+)/(?P<id>[^/]+)'
+
+    _TEST = {
+        'url': 'http://www.netzkino.de/#!/scifikino/rakete-zum-mond',
+        'md5': '92a3f8b76f8d7220acce5377ea5d4873',
+        'info_dict': {
+            'id': 'rakete-zum-mond',
+            'ext': 'mp4',
+            'title': 'Rakete zum Mond (Endstation Mond, Destination Moon)',
+            'comments': 'mincount:3',
+            'description': 'md5:1eddeacc7e62d5a25a2d1a7290c64a28',
+            'upload_date': '20120813',
+            'thumbnail': 're:https?://.*\.jpg$',
+            'timestamp': 1344858571,
+            'age_limit': 12,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        category_id = mobj.group('category')
+        video_id = mobj.group('id')
+
+        api_url = 'http://api.netzkino.de.simplecache.net/capi-2.0a/categories/%s.json?d=www' % category_id
+        api_info = self._download_json(api_url, video_id)
+        info = next(
+            p for p in api_info['posts'] if p['slug'] == video_id)
+        custom_fields = info['custom_fields']
+
+        production_js = self._download_webpage(
+            'http://www.netzkino.de/beta/dist/production.min.js', video_id,
+            note='Downloading player code')
+        avo_js = self._search_regex(
+            r'window\.avoCore\s*=.*?urlTemplate:\s*(\{.*?"\})',
+            production_js, 'URL templates')
+        templates = self._parse_json(
+            avo_js, video_id, transform_source=js_to_json)
+
+        suffix = {
+            'hds': '.mp4/manifest.f4m',
+            'hls': '.mp4/master.m3u8',
+            'pmd': '.mp4',
+        }
+        film_fn = custom_fields['Streaming'][0]
+        formats = [{
+            'format_id': key,
+            'ext': 'mp4',
+            'url': tpl.replace('{}', film_fn) + suffix[key],
+        } for key, tpl in templates.items()]
+        self._sort_formats(formats)
+
+        comments = [{
+            'timestamp': parse_iso8601(c.get('date'), delimiter=' '),
+            'id': c['id'],
+            'author': c['name'],
+            'html': c['content'],
+            'parent': 'root' if c.get('parent', 0) == 0 else c['parent'],
+        } for c in info.get('comments', [])]
+
+        return {
+            'id': video_id,
+            'formats': formats,
+            'comments': comments,
+            'title': info['title'],
+            'age_limit': int_or_none(custom_fields.get('FSK')[0]),
+            'timestamp': parse_iso8601(info.get('date'), delimiter=' '),
+            'description': clean_html(info.get('content')),
+            'thumbnail': info.get('thumbnail'),
+            'playlist_title': api_info.get('title'),
+            'playlist_id': category_id,
+        }
index 8e25ecf280769166a49d18cfd2508bd6d90caa74..45466e31b7445f8dd8da742308dcc69f2ff1152f 100644 (file)
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
+import itertools
 import re
 
 from .common import InfoExtractor
@@ -67,6 +68,10 @@ class WDRIE(InfoExtractor):
                 'upload_date': '20140717',
             },
         },
+        {
+            'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html',
+            'playlist_mincount': 146,
+        }
     ]
 
     def _real_extract(self, url):
@@ -81,6 +86,27 @@ class WDRIE(InfoExtractor):
                 self.url_result(page_url + href, 'WDR')
                 for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage)
             ]
+
+            if entries:  # Playlist page
+                return self.playlist_result(entries, page_id)
+
+            # Overview page
+            entries = []
+            for page_num in itertools.count(2):
+                hrefs = re.findall(
+                    r'<li class="mediathekvideo"\s*>\s*<img[^>]*>\s*<a href="(/mediathek/video/[^"]+)"',
+                    webpage)
+                entries.extend(
+                    self.url_result(page_url + href, 'WDR')
+                    for href in hrefs)
+                next_url_m = re.search(
+                    r'<li class="nextToLast">\s*<a href="([^"]+)"', webpage)
+                if not next_url_m:
+                    break
+                next_url = page_url + next_url_m.group(1)
+                webpage = self._download_webpage(
+                    next_url, page_id,
+                    note='Downloading playlist page %d' % page_num)
             return self.playlist_result(entries, page_id)
 
         flashvars = compat_parse_qs(
@@ -172,8 +198,7 @@ class WDRMausIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
         param_code = self._html_search_regex(
@@ -224,5 +249,3 @@ class WDRMausIE(InfoExtractor):
             'thumbnail': thumbnail,
             'upload_date': upload_date,
         }
-
-# TODO test _1
index e719560711dbdd9d3f31ccc7432ee705132a91d4..bc18276d6c7754a812b04c4ae42bc6c021d22627 100644 (file)
@@ -264,9 +264,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
 
         # Dash mp4 audio
-        '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
-        '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
-        '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
+        '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50},
+        '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50},
+        '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50},
 
         # Dash webm
         '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
index 29739a4833de0a782b359d958d331316cbaf1c24..079e8d2c3f5168b3f0af233f3a5377f15e5a46f3 100644 (file)
@@ -205,6 +205,10 @@ def get_element_by_attribute(attribute, value, html):
 
 def clean_html(html):
     """Clean an HTML snippet into a readable string"""
+
+    if html is None:  # Convenience for sanitizing descriptions etc.
+        return html
+
     # Newline vs <br />
     html = html.replace('\n', ' ')
     html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
index 2d942852f85b8b922dde587baede8024c1e551a1..8c57c7413b4491e3ae6497e2e7bbcb871f556bab 100644 (file)
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2015.01.09'
+__version__ = '2015.01.09.2'