]> gitweb @ CieloNegro.org - youtube-dl.git/blob - youtube_dl/extractor/facebook.py
5e43f235965cb7b702987276d63f2ae43a75c501
[youtube-dl.git] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9     compat_http_client,
10     compat_urllib_error,
11     compat_urllib_parse_unquote,
12 )
13 from ..utils import (
14     error_to_compat_str,
15     ExtractorError,
16     limit_length,
17     sanitized_Request,
18     urlencode_postdata,
19     get_element_by_id,
20     clean_html,
21 )
22
23
24 class FacebookIE(InfoExtractor):
25     _VALID_URL = r'''(?x)
26         https?://(?:\w+\.)?facebook\.com/
27         (?:[^#]*?\#!/)?
28         (?:
29             (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
30             (?:v|video_id)=|
31             [^/]+/videos/(?:[^/]+/)?
32         )
33         (?P<id>[0-9]+)
34         (?:.*)'''
35     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
36     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
37     _NETRC_MACHINE = 'facebook'
38     IE_NAME = 'facebook'
39     _TESTS = [{
40         'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
41         'md5': '6a40d33c0eccbb1af76cf0485a052659',
42         'info_dict': {
43             'id': '637842556329505',
44             'ext': 'mp4',
45             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
46             'uploader': 'Tennis on Facebook',
47         }
48     }, {
49         'note': 'Video without discernible title',
50         'url': 'https://www.facebook.com/video.php?v=274175099429670',
51         'info_dict': {
52             'id': '274175099429670',
53             'ext': 'mp4',
54             'title': 'Facebook video #274175099429670',
55             'uploader': 'Asif Nawab Butt',
56         },
57         'expected_warnings': [
58             'title'
59         ]
60     }, {
61         'url': 'https://www.facebook.com/video.php?v=10204634152394104',
62         'only_matching': True,
63     }, {
64         'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
65         'only_matching': True,
66     }, {
67         'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
68         'only_matching': True,
69     }]
70
71     def _login(self):
72         (useremail, password) = self._get_login_info()
73         if useremail is None:
74             return
75
76         login_page_req = sanitized_Request(self._LOGIN_URL)
77         self._set_cookie('facebook.com', 'locale', 'en_US')
78         login_page = self._download_webpage(login_page_req, None,
79                                             note='Downloading login page',
80                                             errnote='Unable to download login page')
81         lsd = self._search_regex(
82             r'<input type="hidden" name="lsd" value="([^"]*)"',
83             login_page, 'lsd')
84         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
85
86         login_form = {
87             'email': useremail,
88             'pass': password,
89             'lsd': lsd,
90             'lgnrnd': lgnrnd,
91             'next': 'http://facebook.com/home.php',
92             'default_persistent': '0',
93             'legacy_return': '1',
94             'timezone': '-60',
95             'trynum': '1',
96         }
97         request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
98         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
99         try:
100             login_results = self._download_webpage(request, None,
101                                                    note='Logging in', errnote='unable to fetch login page')
102             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
103                 error = self._html_search_regex(
104                     r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
105                     login_results, 'login error', default=None, group='error')
106                 if error:
107                     raise ExtractorError('Unable to login: %s' % error, expected=True)
108                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
109                 return
110
111             fb_dtsg = self._search_regex(
112                 r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
113             h = self._search_regex(
114                 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
115
116             if not fb_dtsg or not h:
117                 return
118
119             check_form = {
120                 'fb_dtsg': fb_dtsg,
121                 'h': h,
122                 'name_action_selected': 'dont_save',
123             }
124             check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
125             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
126             check_response = self._download_webpage(check_req, None,
127                                                     note='Confirming login')
128             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
129                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
130         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
131             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
132             return
133
134     def _real_initialize(self):
135         self._login()
136
137     def _real_extract(self, url):
138         video_id = self._match_id(url)
139         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
140         webpage = self._download_webpage(url, video_id)
141
142         BEFORE = '{swf.addParam(param[0], param[1]);});\n'
143         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
144         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
145         if not m:
146             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
147             if m_msg is not None:
148                 raise ExtractorError(
149                     'The video is not available, Facebook said: "%s"' % m_msg.group(1),
150                     expected=True)
151             else:
152                 raise ExtractorError('Cannot parse data')
153         data = dict(json.loads(m.group(1)))
154         params_raw = compat_urllib_parse_unquote(data['params'])
155         params = json.loads(params_raw)
156
157         formats = []
158         for format_id, f in params['video_data'].items():
159             if not f or not isinstance(f, list):
160                 continue
161             for quality in ('sd', 'hd'):
162                 for src_type in ('src', 'src_no_ratelimit'):
163                     src = f[0].get('%s_%s' % (quality, src_type))
164                     if src:
165                         formats.append({
166                             'format_id': '%s_%s_%s' % (format_id, quality, src_type),
167                             'url': src,
168                             'preference': -10 if format_id == 'progressive' else 0,
169                         })
170         if not formats:
171             raise ExtractorError('Cannot find video formats')
172
173         video_title = self._html_search_regex(
174             r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
175             default=None)
176         if not video_title:
177             video_title = self._html_search_regex(
178                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
179                 webpage, 'alternative title', default=None)
180             video_title = limit_length(video_title, 80)
181         if not video_title:
182             video_title = 'Facebook video #%s' % video_id
183         uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
184
185         return {
186             'id': video_id,
187             'title': video_title,
188             'formats': formats,
189             'uploader': uploader,
190         }