]> gitweb @ CieloNegro.org - youtube-dl.git/blob - youtube_dl/extractor/redtube.py
[ard] Improve _VALID_URL (closes #25134) (#25198)
[youtube-dl.git] / youtube_dl / extractor / redtube.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7     determine_ext,
8     ExtractorError,
9     int_or_none,
10     merge_dicts,
11     str_to_int,
12     unified_strdate,
13     url_or_none,
14 )
15
16
17 class RedTubeIE(InfoExtractor):
18     _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
19     _TESTS = [{
20         'url': 'http://www.redtube.com/66418',
21         'md5': 'fc08071233725f26b8f014dba9590005',
22         'info_dict': {
23             'id': '66418',
24             'ext': 'mp4',
25             'title': 'Sucked on a toilet',
26             'upload_date': '20110811',
27             'duration': 596,
28             'view_count': int,
29             'age_limit': 18,
30         }
31     }, {
32         'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
33         'only_matching': True,
34     }]
35
36     @staticmethod
37     def _extract_urls(webpage):
38         return re.findall(
39             r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
40             webpage)
41
42     def _real_extract(self, url):
43         video_id = self._match_id(url)
44         webpage = self._download_webpage(
45             'http://www.redtube.com/%s' % video_id, video_id)
46
47         ERRORS = (
48             (('video-deleted-info', '>This video has been removed'), 'has been removed'),
49             (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
50         )
51
52         for patterns, message in ERRORS:
53             if any(p in webpage for p in patterns):
54                 raise ExtractorError(
55                     'Video %s %s' % (video_id, message), expected=True)
56
57         info = self._search_json_ld(webpage, video_id, default={})
58
59         if not info.get('title'):
60             info['title'] = self._html_search_regex(
61                 (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
62                  r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
63                 webpage, 'title', group='title',
64                 default=None) or self._og_search_title(webpage)
65
66         formats = []
67         sources = self._parse_json(
68             self._search_regex(
69                 r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
70             video_id, fatal=False)
71         if sources and isinstance(sources, dict):
72             for format_id, format_url in sources.items():
73                 if format_url:
74                     formats.append({
75                         'url': format_url,
76                         'format_id': format_id,
77                         'height': int_or_none(format_id),
78                     })
79         medias = self._parse_json(
80             self._search_regex(
81                 r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
82                 'media definitions', default='{}'),
83             video_id, fatal=False)
84         if medias and isinstance(medias, list):
85             for media in medias:
86                 format_url = url_or_none(media.get('videoUrl'))
87                 if not format_url:
88                     continue
89                 if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
90                     formats.extend(self._extract_m3u8_formats(
91                         format_url, video_id, 'mp4',
92                         entry_protocol='m3u8_native', m3u8_id='hls',
93                         fatal=False))
94                     continue
95                 format_id = media.get('quality')
96                 formats.append({
97                     'url': format_url,
98                     'format_id': format_id,
99                     'height': int_or_none(format_id),
100                 })
101         if not formats:
102             video_url = self._html_search_regex(
103                 r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
104             formats.append({'url': video_url})
105         self._sort_formats(formats)
106
107         thumbnail = self._og_search_thumbnail(webpage)
108         upload_date = unified_strdate(self._search_regex(
109             r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
110             webpage, 'upload date', default=None))
111         duration = int_or_none(self._og_search_property(
112             'video:duration', webpage, default=None) or self._search_regex(
113                 r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
114         view_count = str_to_int(self._search_regex(
115             (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
116              r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
117              r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
118             webpage, 'view count', default=None))
119
120         # No self-labeling, but they describe themselves as
121         # "Home of Videos Porno"
122         age_limit = 18
123
124         return merge_dicts(info, {
125             'id': video_id,
126             'ext': 'mp4',
127             'thumbnail': thumbnail,
128             'upload_date': upload_date,
129             'duration': duration,
130             'view_count': view_count,
131             'age_limit': age_limit,
132             'formats': formats,
133         })