2 from __future__ import unicode_literals
8 from .common import InfoExtractor
11 compat_urllib_request,
24 class NiconicoIE(InfoExtractor):
29 'url': 'http://www.nicovideo.jp/watch/sm22312215',
30 'md5': 'd1a75c0823e2f629128c43e1212760f9',
34 'title': 'Big Buck Bunny',
35 'uploader': 'takuya0301',
36 'uploader_id': '2698420',
37 'upload_date': '20131123',
38 'timestamp': 1385182762,
39 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
43 # File downloaded with and without credentials are different, so omit
45 'url': 'http://www.nicovideo.jp/watch/nm14296458',
49 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
50 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
52 'uploader_id': '18822557',
53 'upload_date': '20110429',
54 'timestamp': 1304065916,
58 # 'video exists but is marked as "deleted"
60 'url': 'http://www.nicovideo.jp/watch/sm10000',
63 'ext': 'unknown_video',
64 'description': 'deleted',
65 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
66 'upload_date': '20071224',
67 'timestamp': 1198527840, # timestamp field has different value if logged in
71 'url': 'http://www.nicovideo.jp/watch/so22543406',
75 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
76 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
77 'timestamp': 1388851200,
78 'upload_date': '20140104',
79 'uploader': 'アニメロチャンネル',
84 _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
85 _NETRC_MACHINE = 'niconico'
86 # Determine whether the downloader used authentication to download video
87 _AUTHENTICATED = False
89 def _real_initialize(self):
93 (username, password) = self._get_login_info()
94 # No authentication to be performed
101 'password': password,
103 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
105 login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
106 login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
107 request = compat_urllib_request.Request(
108 'https://secure.nicovideo.jp/secure/login', login_data)
109 login_results = self._download_webpage(
110 request, None, note='Logging in', errnote='Unable to log in')
111 if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
112 self._downloader.report_warning('unable to log in: bad username or password')
115 self._AUTHENTICATED = True
118 def _real_extract(self, url):
119 video_id = self._match_id(url)
121 # Get video webpage. We are not actually interested in it for normal
122 # cases, but need the cookies in order to be able to download the
124 webpage, handle = self._download_webpage_handle(
125 'http://www.nicovideo.jp/watch/' + video_id, video_id)
126 if video_id.startswith('so'):
127 video_id = self._match_id(handle.geturl())
129 video_info = self._download_xml(
130 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
131 note='Downloading video info page')
133 if self._AUTHENTICATED:
135 flv_info_webpage = self._download_webpage(
136 'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
137 video_id, 'Downloading flv info')
139 # Get external player info
140 ext_player_info = self._download_webpage(
141 'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
142 thumb_play_key = self._search_regex(
143 r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
146 flv_info_data = compat_urllib_parse.urlencode({
150 flv_info_request = compat_urllib_request.Request(
151 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
152 {'Content-Type': 'application/x-www-form-urlencoded'})
153 flv_info_webpage = self._download_webpage(
154 flv_info_request, video_id,
155 note='Downloading flv info', errnote='Unable to download flv info')
157 flv_info = compat_urlparse.parse_qs(flv_info_webpage)
158 if 'url' not in flv_info:
159 if 'deleted' in flv_info:
160 raise ExtractorError('The video has been deleted.',
163 raise ExtractorError('Unable to find video URL')
165 video_real_url = flv_info['url'][0]
167 # Start extracting information
168 title = xpath_text(video_info, './/title')
170 title = self._og_search_title(webpage, default=None)
172 title = self._html_search_regex(
173 r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
174 webpage, 'video title')
176 watch_api_data_string = self._html_search_regex(
177 r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
178 webpage, 'watch api data', default=None)
179 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
180 video_detail = watch_api_data.get('videoDetail', {})
182 extension = xpath_text(video_info, './/movie_type')
184 extension = determine_ext(video_real_url)
187 xpath_text(video_info, './/thumbnail_url') or
188 self._html_search_meta('image', webpage, 'thumbnail', default=None) or
189 video_detail.get('thumbnail'))
191 description = xpath_text(video_info, './/description')
193 timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve'))
195 match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
197 timestamp = parse_iso8601(match.replace('+', ':00+'))
198 if not timestamp and video_detail.get('postedAt'):
199 timestamp = parse_iso8601(
200 video_detail['postedAt'].replace('/', '-'),
201 delimiter=' ', timezone=datetime.timedelta(hours=9))
203 view_count = int_or_none(xpath_text(video_info, './/view_counter'))
205 match = self._html_search_regex(
206 r'>Views: <strong[^>]*>([^<]+)</strong>',
207 webpage, 'view count', default=None)
209 view_count = int_or_none(match.replace(',', ''))
210 view_count = view_count or video_detail.get('viewCount')
212 comment_count = int_or_none(xpath_text(video_info, './/comment_num'))
213 if not comment_count:
214 match = self._html_search_regex(
215 r'>Comments: <strong[^>]*>([^<]+)</strong>',
216 webpage, 'comment count', default=None)
218 comment_count = int_or_none(match.replace(',', ''))
219 comment_count = comment_count or video_detail.get('commentCount')
221 duration = (parse_duration(
222 xpath_text(video_info, './/length') or
223 self._html_search_meta(
224 'video:duration', webpage, 'video duration', default=None)) or
225 video_detail.get('length'))
227 webpage_url = xpath_text(video_info, './/watch_url') or url
229 if video_info.find('.//ch_id') is not None:
230 uploader_id = video_info.find('.//ch_id').text
231 uploader = video_info.find('.//ch_name').text
232 elif video_info.find('.//user_id') is not None:
233 uploader_id = video_info.find('.//user_id').text
234 uploader = video_info.find('.//user_nickname').text
236 uploader_id = uploader = None
240 'url': video_real_url,
243 'format_id': 'economy' if video_real_url.endswith('low') else 'normal',
244 'thumbnail': thumbnail,
245 'description': description,
246 'uploader': uploader,
247 'timestamp': timestamp,
248 'uploader_id': uploader_id,
249 'view_count': view_count,
250 'comment_count': comment_count,
251 'duration': duration,
252 'webpage_url': webpage_url,
256 class NiconicoPlaylistIE(InfoExtractor):
257 _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
260 'url': 'http://www.nicovideo.jp/mylist/27411728',
263 'title': 'AKB48のオールナイトニッポン',
265 'playlist_mincount': 225,
268 def _real_extract(self, url):
269 list_id = self._match_id(url)
270 webpage = self._download_webpage(url, list_id)
272 entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
274 entries = json.loads(entries_json)
277 'ie_key': NiconicoIE.ie_key(),
278 'url': ('http://www.nicovideo.jp/watch/%s' %
279 entry['item_data']['video_id']),
280 } for entry in entries]
284 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),