]> gitweb @ CieloNegro.org - youtube-dl.git/blob - youtube_dl/extractor/revision3.py
[revision3] Add new extractor(closes #6388)
[youtube-dl.git] / youtube_dl / extractor / revision3.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_str
8 from ..utils import (
9     int_or_none,
10     parse_iso8601,
11     unescapeHTML,
12 )
13
14
15 class Revision3IE(InfoExtractor):
16     _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|testtube|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
17     _TESTS = [{
18         'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
19         'md5': 'd94a72d85d0a829766de4deb8daaf7df',
20         'info_dict': {
21             'id': '73034',
22             'ext': 'webm',
23             'title': '5 Google Predictions for 2016',
24             'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
25             'upload_date': '20151228',
26             'timestamp': 1451325600,
27             'duration': 187,
28         }
29     }, {
30         'url': 'http://testtube.com/brainstuff',
31         'info_dict': {
32             'id': '251',
33             'title': 'BrainStuff',
34             'description': 'Whether the topic is popcorn or particle physics, you can count on the HowStuffWorks team to explore-and explain-the everyday science in the world around us on BrainStuff.',
35         },
36         'playlist_mincount': 93,
37     }]
38     _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
39     _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
40
41     def _real_extract(self, url):
42         domain, display_id = re.match(self._VALID_URL, url).groups()
43         page_info = self._download_json(
44             self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
45
46         if page_info['data']['type'] == 'episode':
47             episode_data = page_info['data']
48             video_id = compat_str(episode_data['video']['data']['id'])
49             video_data = self._download_json(
50                 'http://revision3.com/api/getPlaylist.json?api_key=%s&codecs=h264,vp8,theora&video_id=%s' % (self._API_KEY, video_id),
51                 video_id)['items'][0]
52
53             formats = []
54             for media_type, media in video_data['media'].items():
55                 for quality_id, quality in media.items():
56                     if quality_id == 'hls':
57                         formats.extend(self._extract_m3u8_formats(
58                             quality['url'], video_id, 'mp4',
59                             'm3u8_native', m3u8_id='hls', fatal=False))
60                     else:
61                         formats.append({
62                             'url': quality['url'],
63                             'format_id': '%s-%s' % (media_type, quality_id),
64                             'tbr': int_or_none(quality.get('bitrate')),
65                         })
66             self._sort_formats(formats)
67
68             thumbnails = [{
69                 'url': image_url,
70                 'id': image_id,
71             } for image_id, image_url in video_data.get('images', {}).items()]
72
73             return {
74                 'id': video_id,
75                 'title': unescapeHTML(video_data['title']),
76                 'description': unescapeHTML(video_data.get('summary')),
77                 'timestamp': parse_iso8601(episode_data.get('publishTime'), ' '),
78                 'author': episode_data.get('author'),
79                 'duration': int_or_none(video_data.get('duration')),
80                 'thumbnails': thumbnails,
81                 'formats': formats,
82             }
83         else:
84             show_data = page_info['show']['data']
85             episodes_data = page_info['episodes']['data']
86             num_episodes = page_info['meta']['totalEpisodes']
87             processed_episodes = 0
88             entries = []
89             page_num = 1
90             while True:
91                 entries.extend([self.url_result(
92                     url + '/%s' % episode['slug']) for episode in episodes_data])
93                 processed_episodes += len(episodes_data)
94                 if processed_episodes == num_episodes:
95                     break
96                 page_num += 1
97                 episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
98                     domain, display_id + '/' + compat_str(page_num), domain),
99                     display_id)['episodes']['data']
100
101             return self.playlist_result(
102                 entries, compat_str(show_data['id']),
103                 show_data.get('name'), show_data.get('summary'))