2 from __future__ import unicode_literals
6 from .common import InfoExtractor
9 class DramaFeverIE(InfoExtractor):
10 IE_NAME = 'dramafever'
11 _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)/'
13 'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
17 'title': 'Cooking with Shin 4512.1',
18 'upload_date': '20140702',
19 'description': 'Served at all special occasions and featured in the hit drama Heirs, Shin cooks Red Bean Rice.',
23 def _real_extract(self, url):
24 video_id = self._match_id(url).replace("/", ".")
26 consumer_secret = self._get_consumer_secret(video_id)
28 ep_json = self._download_json(
29 "http://www.dramafever.com/amp/episode/feed.json?guid=%s" % video_id,
30 video_id, note='Downloading episode metadata',
31 errnote="Video may not be available for your location")["channel"]["item"]
33 title = ep_json["media-group"]["media-title"]
34 description = ep_json["media-group"]["media-description"]
35 thumbnail = ep_json["media-group"]["media-thumbnail"]["@attributes"]["url"]
36 duration = int(ep_json["media-group"]["media-content"][0]["@attributes"]["duration"])
37 mobj = re.match(r"([0-9]{4})-([0-9]{2})-([0-9]{2})", ep_json["pubDate"])
38 upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) if mobj is not None else None
41 for vid_format in ep_json["media-group"]["media-content"]:
42 src = vid_format["@attributes"]["url"]
44 formats.extend(self._extract_f4m_formats(src, video_id))
46 self._sort_formats(formats)
47 video_subtitles = self.extract_subtitles(video_id, consumer_secret)
52 'description': description,
53 'thumbnail': thumbnail,
54 'upload_date': upload_date,
57 'subtitles': video_subtitles,
60 def _get_consumer_secret(self, video_id):
61 df_js = self._download_webpage(
62 "http://www.dramafever.com/static/126960d/v2/js/plugins/jquery.threadedcomments.js", video_id)
63 return self._search_regex(r"'cs': '([0-9a-zA-Z]+)'", df_js, "cs")
65 def _get_episodes(self, series_id, consumer_secret, episode_filter=None):
69 max_pages = curr_page + 1
71 while max_pages >= curr_page:
72 page_url = "http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d" % \
73 (consumer_secret, series_id, _PAGE_SIZE, curr_page)
74 series = self._download_json(
75 page_url, series_id, note="Downloading series json page #%d" % curr_page)
76 max_pages = series['num_pages']
77 results.extend([ep for ep in series['value'] if episode_filter is None or episode_filter(ep)])
81 def _get_subtitles(self, video_id, consumer_secret):
84 info = self._get_episodes(
85 video_id.split(".")[0], consumer_secret,
86 episode_filter=lambda x: x['guid'] == video_id)
88 if len(info) == 1 and info[0]['subfile'] != '':
89 res = {'en': [{'url': info[0]['subfile'], 'ext': 'srt'}]}
93 class DramaFeverSeriesIE(DramaFeverIE):
94 IE_NAME = 'dramafever:series'
95 _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)/\d*[a-zA-Z_][a-zA-Z0-9_]*/'
97 'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
100 'title': 'Cooking with Shin',
101 'description': 'Professional chef and cooking instructor Shin Kim takes some of the delicious dishes featured in your favorite dramas and shows you how to make them right at home.',
105 'url': 'http://www.dramafever.com/drama/124/IRIS/',
109 'description': 'Lee Byung Hun and Kim Tae Hee star in this powerhouse drama and ratings megahit of action, intrigue and romance.',
111 'playlist_count': 20,
114 def _real_extract(self, url):
115 series_id = self._match_id(url)
116 consumer_secret = self._get_consumer_secret(series_id)
118 series_json = self._download_json(
119 "http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s" % (consumer_secret, series_id),
120 series_id, note='Downloading series metadata')["series"][series_id]
122 title = series_json["name"]
123 description = series_json["description_short"]
125 episodes = self._get_episodes(series_id, consumer_secret)
128 entries.append(self.url_result(
129 'http://www.dramafever.com%s' % ep['episode_url'], 'DramaFever', ep['guid']))
130 return self.playlist_result(entries, series_id, title, description)