]> gitweb @ CieloNegro.org - youtube-dl.git/blob - youtube_dl/extractor/facebook.py
[facebook] Support alternative webpage form
[youtube-dl.git] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9     compat_http_client,
10     compat_urllib_error,
11     compat_urllib_parse_unquote,
12 )
13 from ..utils import (
14     error_to_compat_str,
15     ExtractorError,
16     limit_length,
17     sanitized_Request,
18     urlencode_postdata,
19     get_element_by_id,
20     clean_html,
21 )
22
23
24 class FacebookIE(InfoExtractor):
25     _VALID_URL = r'''(?x)
26                 (?:
27                     https?://
28                         (?:\w+\.)?facebook\.com/
29                         (?:[^#]*?\#!/)?
30                         (?:
31                             (?:
32                                 video/video\.php|
33                                 photo\.php|
34                                 video\.php|
35                                 video/embed
36                             )\?(?:.*?)(?:v|video_id)=|
37                             [^/]+/videos/(?:[^/]+/)?
38                         )|
39                     facebook:
40                 )
41                 (?P<id>[0-9]+)
42                 '''
43     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
44     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
45     _NETRC_MACHINE = 'facebook'
46     IE_NAME = 'facebook'
47     _TESTS = [{
48         'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
49         'md5': '6a40d33c0eccbb1af76cf0485a052659',
50         'info_dict': {
51             'id': '637842556329505',
52             'ext': 'mp4',
53             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
54             'uploader': 'Tennis on Facebook',
55         }
56     }, {
57         'note': 'Video without discernible title',
58         'url': 'https://www.facebook.com/video.php?v=274175099429670',
59         'info_dict': {
60             'id': '274175099429670',
61             'ext': 'mp4',
62             'title': 'Facebook video #274175099429670',
63             'uploader': 'Asif Nawab Butt',
64         },
65         'expected_warnings': [
66             'title'
67         ]
68     }, {
69         'url': 'https://www.facebook.com/video.php?v=10204634152394104',
70         'only_matching': True,
71     }, {
72         'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
73         'only_matching': True,
74     }, {
75         'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
76         'only_matching': True,
77     }, {
78         'url': 'facebook:544765982287235',
79         'only_matching': True,
80     }]
81
82     def _login(self):
83         (useremail, password) = self._get_login_info()
84         if useremail is None:
85             return
86
87         login_page_req = sanitized_Request(self._LOGIN_URL)
88         self._set_cookie('facebook.com', 'locale', 'en_US')
89         login_page = self._download_webpage(login_page_req, None,
90                                             note='Downloading login page',
91                                             errnote='Unable to download login page')
92         lsd = self._search_regex(
93             r'<input type="hidden" name="lsd" value="([^"]*)"',
94             login_page, 'lsd')
95         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
96
97         login_form = {
98             'email': useremail,
99             'pass': password,
100             'lsd': lsd,
101             'lgnrnd': lgnrnd,
102             'next': 'http://facebook.com/home.php',
103             'default_persistent': '0',
104             'legacy_return': '1',
105             'timezone': '-60',
106             'trynum': '1',
107         }
108         request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
109         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
110         try:
111             login_results = self._download_webpage(request, None,
112                                                    note='Logging in', errnote='unable to fetch login page')
113             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
114                 error = self._html_search_regex(
115                     r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
116                     login_results, 'login error', default=None, group='error')
117                 if error:
118                     raise ExtractorError('Unable to login: %s' % error, expected=True)
119                 self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
120                 return
121
122             fb_dtsg = self._search_regex(
123                 r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
124             h = self._search_regex(
125                 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
126
127             if not fb_dtsg or not h:
128                 return
129
130             check_form = {
131                 'fb_dtsg': fb_dtsg,
132                 'h': h,
133                 'name_action_selected': 'dont_save',
134             }
135             check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
136             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
137             check_response = self._download_webpage(check_req, None,
138                                                     note='Confirming login')
139             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
140                 self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
141         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
142             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
143             return
144
145     def _real_initialize(self):
146         self._login()
147
148     def _real_extract(self, url):
149         video_id = self._match_id(url)
150         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
151         webpage = self._download_webpage(url, video_id)
152
153         video_data = None
154
155         BEFORE = '{swf.addParam(param[0], param[1]);});\n'
156         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
157         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
158         if m:
159             data = dict(json.loads(m.group(1)))
160             params_raw = compat_urllib_parse_unquote(data['params'])
161             video_data = json.loads(params_raw)['video_data']
162
163         def video_data_list2dict(video_data):
164             ret = {}
165             for item in video_data:
166                 format_id = item['stream_type']
167                 ret.setdefault(format_id, []).append(item)
168             return ret
169
170         if not video_data:
171             server_js_data = self._parse_json(self._search_regex(
172                 r'handleServerJS\(({.+})\);', webpage, 'server js data'), video_id)
173             for item in server_js_data['instances']:
174                 if item[1][0] == 'VideoConfig':
175                     video_data = video_data_list2dict(item[2][0]['videoData'])
176                     break
177
178         if not video_data:
179             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
180             if m_msg is not None:
181                 raise ExtractorError(
182                     'The video is not available, Facebook said: "%s"' % m_msg.group(1),
183                     expected=True)
184             else:
185                 raise ExtractorError('Cannot parse data')
186
187         formats = []
188         for format_id, f in video_data.items():
189             if not f or not isinstance(f, list):
190                 continue
191             for quality in ('sd', 'hd'):
192                 for src_type in ('src', 'src_no_ratelimit'):
193                     src = f[0].get('%s_%s' % (quality, src_type))
194                     if src:
195                         formats.append({
196                             'format_id': '%s_%s_%s' % (format_id, quality, src_type),
197                             'url': src,
198                             'preference': -10 if format_id == 'progressive' else 0,
199                         })
200         if not formats:
201             raise ExtractorError('Cannot find video formats')
202
203         video_title = self._html_search_regex(
204             r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
205             default=None)
206         if not video_title:
207             video_title = self._html_search_regex(
208                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
209                 webpage, 'alternative title', default=None)
210             video_title = limit_length(video_title, 80)
211         if not video_title:
212             video_title = 'Facebook video #%s' % video_id
213         uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
214
215         return {
216             'id': video_id,
217             'title': video_title,
218             'formats': formats,
219             'uploader': uploader,
220         }
221
222
223 class FacebookPostIE(InfoExtractor):
224     IE_NAME = 'facebook:post'
225     _VALID_URL = r'https?://(?:\w+\.)?facebook\.com/[^/]+/posts/(?P<id>\d+)'
226     _TEST = {
227         'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
228         'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
229         'info_dict': {
230             'id': '544765982287235',
231             'ext': 'mp4',
232             'title': '"What are you doing running in the snow?"',
233             'uploader': 'FailArmy',
234         }
235     }
236
237     def _real_extract(self, url):
238         post_id = self._match_id(url)
239
240         webpage = self._download_webpage(url, post_id)
241
242         entries = [
243             self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
244             for video_id in self._parse_json(
245                 self._search_regex(
246                     r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
247                     webpage, 'video ids', group='ids'),
248                 post_id)]
249
250         return self.playlist_result(entries, post_id)