1 from __future__ import unicode_literals
7 from .common import InfoExtractor
13 class BambuserIE(InfoExtractor):
15 _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
16 _API_KEY = '005f64509e19a868399060af746a00aa'
19 'url': 'http://bambuser.com/v/4050584',
20 # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
21 # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
25 'title': 'Education engineering days - lightning talks',
27 'uploader': 'pixelversity',
28 'uploader_id': '344706',
31 # It doesn't respect the 'Range' header, it would download the whole video
32 # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
33 'skip_download': True,
37 def _real_extract(self, url):
38 video_id = self._match_id(url)
40 info = self._download_json(
41 'http://player-c.api.bambuser.com/getVideo.json?api_key=%s&vid=%s'
42 % (self._API_KEY, video_id), video_id)['result']
46 'title': info['title'],
48 'thumbnail': info.get('preview'),
49 'duration': int(info['length']),
50 'view_count': int(info['views_total']),
51 'uploader': info['username'],
52 'uploader_id': info['owner']['uid'],
56 class BambuserChannelIE(InfoExtractor):
57 IE_NAME = 'bambuser:channel'
58 _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
59 # The maximum number we can get with each request
62 'url': 'http://bambuser.com/channel/pixelversity',
64 'title': 'pixelversity',
66 'playlist_mincount': 60,
69 def _real_extract(self, url):
70 mobj = re.match(self._VALID_URL, url)
71 user = mobj.group('user')
74 for i in itertools.count(1):
76 'http://bambuser.com/xhr-api/index.php?username={user}'
77 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
78 '&method=broadcast&format=json&vid_older_than={last}'
79 ).format(user=user, count=self._STEP, last=last_id)
80 req = compat_urllib_request.Request(req_url)
81 # Without setting this header, we wouldn't get any result
82 req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
83 data = self._download_json(
84 req, user, 'Downloading page %d' % i)
85 results = data['result']
88 last_id = results[-1]['vid']
89 urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)