· Import from compat what comes from compat. Yes, some names are available in utils too, but that's an implementation detail.
· Use _match_id consistently whenever possible
· Fix some outdated tests
· Use consistent valid URL (always match the whole protocol, no ^ at start required)
· Use modern test definitions
120 files changed:
+from ..compat import compat_str
encodeFilename,
format_bytes,
timeconvert,
encodeFilename,
format_bytes,
timeconvert,
from .common import FileDownloader
from .http import HttpFD
from .common import FileDownloader
from .http import HttpFD
+from ..compat import (
+ compat_urlparse,
+)
from ..utils import (
struct_pack,
struct_unpack,
from ..utils import (
struct_pack,
struct_unpack,
format_bytes,
encodeFilename,
sanitize_open,
format_bytes,
encodeFilename,
sanitize_open,
from ..postprocessor.ffmpeg import FFmpegPostProcessor
from .common import FileDownloader
from ..postprocessor.ffmpeg import FFmpegPostProcessor
from .common import FileDownloader
compat_urlparse,
compat_urllib_request,
compat_urlparse,
compat_urllib_request,
+)
+from ..utils import (
check_executable,
encodeFilename,
)
check_executable,
encodeFilename,
)
import time
from .common import FileDownloader
import time
from .common import FileDownloader
compat_urllib_request,
compat_urllib_error,
compat_urllib_request,
compat_urllib_error,
+)
+from ..utils import (
encodeFilename,
sanitize_open,
format_bytes,
encodeFilename,
sanitize_open,
format_bytes,
import time
from .common import FileDownloader
import time
from .common import FileDownloader
+from ..compat import compat_str
from ..utils import (
check_executable,
from ..utils import (
check_executable,
encodeFilename,
format_bytes,
get_exe_version,
encodeFilename,
format_bytes,
get_exe_version,
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+from ..compat import compat_str
'format_id': format_id,
'quality': quality(format_id),
'url': v,
'format_id': format_id,
'quality': quality(format_id),
'url': v,
- 'ext': determine_ext(v),
self._sort_formats(formats)
return {
self._sort_formats(formats)
return {
'title': title,
'entries': entries,
}
'title': title,
'entries': entries,
}
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+from ..compat import compat_urlparse
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import compat_urllib_parse
determine_ext,
ExtractorError,
)
determine_ext,
ExtractorError,
)
import itertools
from .common import InfoExtractor
import itertools
from .common import InfoExtractor
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_str,
compat_urlparse,
compat_str,
compat_urlparse,
+)
+from ..utils import (
from __future__ import unicode_literals
from .common import InfoExtractor
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import compat_urllib_parse
xpath_text,
xpath_with_ns,
int_or_none,
xpath_text,
xpath_with_ns,
int_or_none,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import compat_parse_qs
ExtractorError,
int_or_none,
unified_strdate,
ExtractorError,
int_or_none,
unified_strdate,
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
video_code = self._search_regex(
r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
video_code = self._search_regex(
r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
import xml.etree.ElementTree
from .common import InfoExtractor
import xml.etree.ElementTree
from .common import InfoExtractor
-from ..utils import (
- compat_urllib_parse,
- find_xpath_attr,
- fix_xml_ampersands,
- compat_urlparse,
- compat_str,
- compat_urllib_request,
+ compat_str,
+ compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_parse_urlparse,
+ compat_urllib_request,
+ compat_urlparse,
+)
+from ..utils import (
determine_ext,
ExtractorError,
determine_ext,
ExtractorError,
+ find_xpath_attr,
+ fix_xml_ampersands,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
+)
+from ..utils import (
import re
from .mtv import MTVServicesInfoExtractor
import re
from .mtv import MTVServicesInfoExtractor
compat_str,
compat_urllib_parse,
compat_str,
compat_urllib_parse,
+)
+from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
ExtractorError,
float_or_none,
unified_strdate,
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urlparse,
)
compat_urllib_parse_urlparse,
compat_urlparse,
)
+from ..utils import (
+ orderedSet,
+)
class CondeNastIE(InfoExtractor):
class CondeNastIE(InfoExtractor):
from hashlib import sha1
from math import pow, sqrt, floor
from .subtitles import SubtitlesInfoExtractor
from hashlib import sha1
from math import pow, sqrt, floor
from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
bytes_to_intlist,
intlist_to_bytes,
unified_strdate,
bytes_to_intlist,
intlist_to_bytes,
unified_strdate,
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
- compat_urllib_request,
+ compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
+ int_or_none,
- int_or_none,
- ExtractorError,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
from __future__ import unicode_literals
from __future__ import unicode_literals
-import re
-
-from ..utils import (
compat_urllib_parse,
)
from .common import InfoExtractor
compat_urllib_parse,
)
from .common import InfoExtractor
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
- video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
- webpage, 'video URL')
+ video_url = self._search_regex(
+ r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
final_url = compat_urllib_parse.unquote(video_url)
uploader = self._html_search_meta('uploader', webpage)
title = self._og_search_title(webpage).replace(' | eHow', '')
final_url = compat_urllib_parse.unquote(video_url)
uploader = self._html_search_meta('uploader', webpage)
title = self._og_search_title(webpage).replace(' | eHow', '')
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
+)
+from ..utils import (
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
+ _VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
+ video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from .common import InfoExtractor
import hashlib
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_request,
compat_urlparse,
)
compat_urllib_request,
compat_urlparse,
)
+from ..utils import (
+ ExtractorError,
+)
class FC2IE(InfoExtractor):
class FC2IE(InfoExtractor):
- _VALID_URL = r'^http://video\.fc2\.com/((?P<lang>[^/]+)/)?content/(?P<id>[^/]+)'
+ _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_TEST = {
'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
IE_NAME = 'fc2'
_TEST = {
'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
self._downloader.cookiejar.clear_session_cookies() # must clear
webpage = self._download_webpage(url, video_id)
self._downloader.cookiejar.clear_session_cookies() # must clear
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
+from ..utils import (
+ ExtractorError,
+)
class FiredriveIE(InfoExtractor):
class FiredriveIE(InfoExtractor):
}]
def _real_extract(self, url):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
url = 'http://firedrive.com/file/%s' % video_id
url = 'http://firedrive.com/file/%s' % video_id
webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
- unified_strdate,
- str_to_int,
- parse_duration,
+)
+from ..utils import (
+ parse_duration,
+ str_to_int,
+ unified_strdate,
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
-
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage_url = 'http://www.4tube.com/videos/' + video_id
webpage = self._download_webpage(webpage_url, video_id)
webpage_url = 'http://www.4tube.com/videos/' + video_id
webpage = self._download_webpage(webpage_url, video_id)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_parse_qs,
compat_urlparse,
)
compat_parse_qs,
compat_urlparse,
)
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse_urlparse,
+)
+from ..utils import (
- parse_duration,
- compat_urllib_parse_urlparse,
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
compat_urllib_parse,
compat_urlparse,
compat_urllib_parse,
compat_urlparse,
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
from __future__ import unicode_literals
from .common import InfoExtractor
from __future__ import unicode_literals
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import SearchInfoExtractor
import re
from .common import SearchInfoExtractor
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- determine_ext,
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
formats = [{
'format_id': 'sd',
'url': video_url,
formats = [{
'format_id': 'sd',
'url': video_url,
- 'ext': determine_ext(video_url),
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_request,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
int_or_none,
urlencode_postdata,
)
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
# Custom request with cookie to set language to English, so our file
url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
# Custom request with cookie to set language to English, so our file
from __future__ import unicode_literals
import json
from __future__ import unicode_literals
import json
import time
from .common import InfoExtractor
import time
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
)
class HypemIE(InfoExtractor):
ExtractorError,
)
class HypemIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
+ _VALID_URL = r'http://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- track_id = mobj.group(1)
+ track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
data_encoded = compat_urllib_parse.urlencode(data)
data = {'ax': 1, 'ts': time.time()}
data_encoded = compat_urllib_parse.urlencode(data)
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
from __future__ import unicode_literals
import base64
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from .common import InfoExtractor
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urlparse,
compat_urllib_parse,
compat_urlparse,
compat_urllib_parse,
+)
+from ..utils import (
from math import floor
from .common import InfoExtractor
from math import floor
from .common import InfoExtractor
+)
+from ..utils import (
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
class KeezMoviesIE(InfoExtractor):
class KeezMoviesIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:www\.)?keezmovies\.com/video/.+?(?P<videoid>[0-9]+)(?:[/?&]|$)'
+ _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)'
_TEST = {
'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
'file': '1214711.mp4',
_TEST = {
'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
'file': '1214711.mp4',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
+ video_id = self._match_id(url)
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
+)
+from ..utils import (
ExtractorError,
find_xpath_attr,
int_or_none,
ExtractorError,
find_xpath_attr,
int_or_none,
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
int_or_none,
ExtractorError,
int_or_none,
from __future__ import unicode_literals
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
compat_urllib_parse,
)
class MalemotionIE(InfoExtractor):
compat_urllib_parse,
)
class MalemotionIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
+ _VALID_URL = r'https?://malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
- 'url': 'http://malemotion.com/video/bien-dur.10ew',
- 'file': '10ew.mp4',
- 'md5': 'b3cc49f953b107e4a363cdff07d100ce',
+ 'url': 'http://malemotion.com/video/bete-de-concours.ltc',
+ 'md5': '3013e53a0afbde2878bc39998c33e8a5',
- "title": "Bien dur",
- "age_limit": 18,
+ 'id': 'ltc',
+ 'ext': 'mp4',
+ 'title': 'Bête de Concours',
+ 'age_limit': 18,
- 'skip': 'This video has been deleted.'
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group("id")
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
- self.report_extraction(video_id)
-
- # Extract video URL
- video_url = compat_urllib_parse.unquote(
- self._search_regex(r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
-
- # Extract title
+ video_url = compat_urllib_parse.unquote(self._search_regex(
+ r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
-
- # Extract video thumbnail
video_thumbnail = self._search_regex(
r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
video_thumbnail = self._search_regex(
r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
'format_id': 'mp4',
'preference': 1,
}]
'format_id': 'mp4',
'preference': 1,
}]
+ self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
return {
'id': video_id,
'formats': formats,
- 'uploader': None,
- 'upload_date': None,
'title': video_title,
'thumbnail': video_thumbnail,
'title': video_title,
'thumbnail': video_thumbnail,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
determine_ext,
ExtractorError,
int_or_none,
from .common import InfoExtractor
from .youtube import YoutubeIE
from .common import InfoExtractor
from .youtube import YoutubeIE
+)
+from ..utils import (
clean_html,
ExtractorError,
get_element_by_id,
clean_html,
ExtractorError,
get_element_by_id,
from __future__ import unicode_literals
from __future__ import unicode_literals
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
compat_urllib_parse,
compat_urlparse,
compat_urllib_parse,
compat_urlparse,
+)
+from ..utils import (
get_element_by_attribute,
parse_duration,
strip_jsonp,
get_element_by_attribute,
parse_duration,
strip_jsonp,
class MiTeleIE(InfoExtractor):
IE_NAME = 'mitele.es'
class MiTeleIE(InfoExtractor):
IE_NAME = 'mitele.es'
- _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<episode>[^/]+)/'
+ _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
_TEST = {
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- episode = mobj.group('episode')
+ episode = self._match_id(url)
webpage = self._download_webpage(url, episode)
embed_data_json = self._search_regex(
webpage = self._download_webpage(url, episode)
embed_data_json = self._search_regex(
- r'MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
- flags=re.DOTALL
+ r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
).replace('\'', '"')
embed_data = json.loads(embed_data_json)
).replace('\'', '"')
embed_data = json.loads(embed_data_json)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
ExtractorError,
HEADRequest,
int_or_none,
ExtractorError,
HEADRequest,
int_or_none,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
class MofosexIE(InfoExtractor):
class MofosexIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
+ _VALID_URL = r'https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<id>[0-9]+)/.*?\.html)'
_TEST = {
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
_TEST = {
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
+ video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
}]
def _real_extract(self, url):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
orig_webpage = self._download_webpage(url, video_id)
orig_webpage = self._download_webpage(url, video_id)
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
data = dict(fields)
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
data = dict(fields)
import time
from .common import InfoExtractor
import time
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_request,
compat_urllib_parse,
)
compat_urllib_request,
compat_urllib_parse,
)
+from ..utils import (
+ ExtractorError,
+)
class MooshareIE(InfoExtractor):
class MooshareIE(InfoExtractor):
]
def _real_extract(self, url):
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
page = self._download_webpage(url, video_id, 'Downloading page')
if re.search(r'>Video Not Found or Deleted<', page) is not None:
page = self._download_webpage(url, video_id, 'Downloading page')
if re.search(r'>Video Not Found or Deleted<', page) is not None:
import hashlib
import json
import hashlib
import json
import time
from .common import InfoExtractor
import time
from .common import InfoExtractor
compat_parse_qs,
compat_str,
compat_parse_qs,
compat_str,
+)
+from ..utils import (
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('id')
-
+ display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
webpage = self._download_webpage(url, display_id)
flashvars_code = self._html_search_regex(
r'<embed id="player".*?flashvars="([^"]+)"', webpage, 'flashvars')
flashvars = compat_parse_qs(flashvars_code)
flashvars_code = self._html_search_regex(
r'<embed id="player".*?flashvars="([^"]+)"', webpage, 'flashvars')
flashvars = compat_parse_qs(flashvars_code)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
import os.path
from .common import InfoExtractor
import os.path
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_parse_urlparse,
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
ExtractorError,
clean_html,
)
ExtractorError,
clean_html,
)
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
ExtractorError,
find_xpath_attr,
)
ExtractorError,
find_xpath_attr,
)
from __future__ import unicode_literals
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
compat_urllib_request,
compat_urllib_parse,
)
compat_urllib_request,
compat_urllib_parse,
)
class NFBIE(InfoExtractor):
IE_NAME = 'nfb'
IE_DESC = 'National Film Board of Canada'
class NFBIE(InfoExtractor):
IE_NAME = 'nfb'
IE_DESC = 'National Film Board of Canada'
- _VALID_URL = r'https?://(?:www\.)?(nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)'
_TEST = {
'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny',
_TEST = {
'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')
+ video_id = self._match_id(url)
+ page = self._download_webpage(
+ 'https://www.nfb.ca/film/%s' % video_id, video_id,
+ 'Downloading film page')
uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
page, 'director id', fatal=False)
uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
page, 'director id', fatal=False)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse_urlparse,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
- compat_urllib_parse_urlparse,
int_or_none,
remove_end,
)
int_or_none,
remove_end,
)
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
- unified_strdate,
- parse_duration,
- int_or_none,
+)
+from ..utils import (
+ int_or_none,
+ parse_duration,
+ unified_strdate,
import hashlib
from .common import InfoExtractor
import hashlib
from .common import InfoExtractor
-from ..utils import (
- compat_urllib_request,
+from ..compat import (
+ compat_str,
+ compat_urllib_request,
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_request,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
urlencode_postdata,
xpath_text,
xpath_with_ns,
urlencode_postdata,
xpath_text,
xpath_with_ns,
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
fields = {
'id': video_id,
fields = {
'id': video_id,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urlparse,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_request,
+)
from ..utils import (
parse_duration,
unified_strdate,
from ..utils import (
parse_duration,
unified_strdate,
)
class NuvidIE(InfoExtractor):
)
class NuvidIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
+ _VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://m.nuvid.com/video/1310741/',
'md5': 'eab207b7ac4fccfb4e23c86201f11277',
_TEST = {
'url': 'http://m.nuvid.com/video/1310741/',
'md5': 'eab207b7ac4fccfb4e23c86201f11277',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
-from ..utils import compat_urllib_parse
+from ..compat import compat_urllib_parse
class PhotobucketIE(InfoExtractor):
_VALID_URL = r'http://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
_TEST = {
'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0',
class PhotobucketIE(InfoExtractor):
_VALID_URL = r'http://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
_TEST = {
'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0',
- 'file': 'zpsc0c3b9fa.mp4',
'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
'info_dict': {
'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
'info_dict': {
+ 'id': 'zpsc0c3b9fa',
+ 'ext': 'mp4',
'timestamp': 1367669341,
'upload_date': '20130504',
'uploader': 'rachaneronas',
'timestamp': 1367669341,
'upload_date': '20130504',
'uploader': 'rachaneronas',
import os.path
from .common import InfoExtractor
import os.path
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
+from ..utils import (
+ ExtractorError,
+)
class PlayedIE(InfoExtractor):
class PlayedIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
def _real_extract(self, url):
video_id = self._match_id(url)
orig_webpage = self._download_webpage(url, video_id)
m_error = re.search(
orig_webpage = self._download_webpage(url, video_id)
m_error = re.search(
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
ExtractorError,
float_or_none,
int_or_none,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+)
)
class PlayvidIE(InfoExtractor):
)
class PlayvidIE(InfoExtractor):
- _VALID_URL = r'^https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
+ _VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
- 'url': 'http://www.playvid.com/watch/agbDDi7WZTV',
- 'md5': '44930f8afa616efdf9482daf4fe53e1e',
+ 'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
+ 'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
- 'title': 'Michelle Lewin in Miami Beach',
- 'duration': 240,
+ 'title': 'md5:9256d01c6317e3f703848b5906880dc8',
+ 'duration': 82,
'age_limit': 18,
}
}
def _real_extract(self, url):
'age_limit': 18,
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
webpage = self._download_webpage(url, video_id)
m_error = re.search(
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse_urlparse,
compat_urllib_request,
+)
+from ..utils import (
str_to_int,
)
from ..aes import (
str_to_int,
)
from ..aes import (
class PornHubIE(InfoExtractor):
class PornHubIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
+ _VALID_URL = r'https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
_TEST = {
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '882f488fa1f0026f023f33576004a2ed',
_TEST = {
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '882f488fa1f0026f023f33576004a2ed',
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- determine_ext,
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
+from ..utils import (
+ determine_ext,
+ ExtractorError,
+)
class PromptFileIE(InfoExtractor):
class PromptFileIE(InfoExtractor):
from hashlib import sha1
from .common import InfoExtractor
from hashlib import sha1
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
determine_ext,
int_or_none,
)
determine_ext,
int_or_none,
)
import re
from .subtitles import SubtitlesInfoExtractor
import re
from .subtitles import SubtitlesInfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+)
from ..utils import (
parse_duration,
unified_strdate,
from ..utils import (
parse_duration,
unified_strdate,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+)
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
unescapeHTML,
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
unescapeHTML,
import itertools
from .common import InfoExtractor
import itertools
from .common import InfoExtractor
+)
+from ..utils import (
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- movie_id = mobj.group('id')
+ movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
compat_parse_qs,
compat_urllib_request,
)
compat_parse_qs,
compat_urllib_request,
)
+from ..utils import (
+ ExtractorError,
+)
class ScreencastIE(InfoExtractor):
class ScreencastIE(InfoExtractor):
]
def _real_extract(self, url):
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
import base64
from .common import InfoExtractor
import base64
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
- compat_urllib_request,
- compat_urllib_parse,
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage(url, video_id)
-
- if re.search(r'>File does not exist<', page) is not None:
- raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
- download_form = dict(re.findall(r'<input type="hidden" name="([^"]+)" value="([^"]*)"', page))
+ if '>File does not exist<' in webpage:
+ raise ExtractorError(
+ 'Video %s does not exist' % video_id, expected=True)
- request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(download_form))
+ download_form = dict(re.findall(
+ r'<input type="hidden" name="([^"]+)" value="([^"]*)"', webpage))
+ request = compat_urllib_request.Request(
+ url, compat_urllib_parse.urlencode(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- video_page = self._download_webpage(request, video_id, 'Downloading video page')
+ video_page = self._download_webpage(
+ request, video_id, 'Downloading video page')
- video_url = self._html_search_regex(r'data-url="([^"]+)"', video_page, 'video URL')
- title = base64.b64decode(self._html_search_meta('full:title', page, 'title')).decode('utf-8')
- filesize = int_or_none(self._html_search_meta('full:size', page, 'file size', fatal=False))
+ video_url = self._html_search_regex(
+ r'data-url="([^"]+)"', video_page, 'video URL')
+ title = base64.b64decode(self._html_search_meta(
+ 'full:title', webpage, 'title')).decode('utf-8')
+ filesize = int_or_none(self._html_search_meta(
+ 'full:size', webpage, 'file size', fatal=False))
thumbnail = self._html_search_regex(
thumbnail = self._html_search_regex(
- r'data-poster="([^"]+)"', video_page, 'thumbnail', fatal=False, default=None)
+ r'data-poster="([^"]+)"', video_page, 'thumbnail', default=None)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_request,
compat_urllib_parse,
)
compat_urllib_request,
compat_urllib_parse,
)
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
import uuid
from .common import InfoExtractor
import uuid
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
int_or_none,
unified_strdate,
ExtractorError,
int_or_none,
unified_strdate,
# coding: utf-8
from __future__ import unicode_literals
# coding: utf-8
from __future__ import unicode_literals
-from ..utils import (
- ExtractorError,
+import re
+
+from ..compat import (
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
from .common import InfoExtractor
from .common import InfoExtractor
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
url = 'http://sockshare.com/file/%s' % video_id
webpage = self._download_webpage(url, video_id)
url = 'http://sockshare.com/file/%s' % video_id
webpage = self._download_webpage(url, video_id)
import itertools
from .common import InfoExtractor
import itertools
from .common import InfoExtractor
compat_str,
compat_urlparse,
compat_urllib_parse,
compat_str,
compat_urlparse,
compat_urllib_parse,
+)
+from ..utils import (
ExtractorError,
int_or_none,
unified_strdate,
ExtractorError,
int_or_none,
unified_strdate,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse_urlparse,
compat_urllib_request,
- compat_urllib_parse,
- unified_strdate,
+)
+from ..utils import (
)
from ..aes import aes_decrypt_text
)
from ..aes import aes_decrypt_text
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
import time
from .common import InfoExtractor
import time
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
-import re
-import json
-
from .common import InfoExtractor
from .common import InfoExtractor
-from ..compat import (
- compat_str,
-)
from ..utils import (
int_or_none,
)
from ..utils import (
int_or_none,
)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_request,
+)
float_or_none,
parse_iso8601,
)
float_or_none,
parse_iso8601,
)
from .subtitles import SubtitlesInfoExtractor
from .subtitles import SubtitlesInfoExtractor
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
determine_ext,
ExtractorError,
xpath_with_ns,
determine_ext,
ExtractorError,
xpath_with_ns,
from .common import InfoExtractor
from .brightcove import BrightcoveIE
from .discovery import DiscoveryIE
from .common import InfoExtractor
from .brightcove import BrightcoveIE
from .discovery import DiscoveryIE
-from ..utils import compat_urlparse
+from ..compat import compat_urlparse
class TlcIE(DiscoveryIE):
class TlcIE(DiscoveryIE):
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse_urlparse,
compat_urllib_request,
+)
+from ..utils import (
int_or_none,
str_to_int,
)
int_or_none,
str_to_int,
)
from __future__ import unicode_literals
import base64
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from .common import InfoExtractor
-from ..utils import compat_parse_qs
+from ..compat import compat_parse_qs
class TutvIE(InfoExtractor):
class TutvIE(InfoExtractor):
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
internal_id = self._search_regex(r'codVideo=([0-9]+)', webpage, 'internal video ID')
data_content = self._download_webpage(
internal_id = self._search_regex(r'codVideo=([0-9]+)', webpage, 'internal video ID')
data_content = self._download_webpage(
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
parse_iso8601,
)
ExtractorError,
parse_iso8601,
)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
# coding: utf-8
from __future__ import unicode_literals
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
+)
+from ..utils import (
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
# encoding: utf-8
from __future__ import unicode_literals
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
compat_urllib_parse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
)
class Vbox7IE(InfoExtractor):
ExtractorError,
)
class Vbox7IE(InfoExtractor):
- _VALID_URL = r'http://(www\.)?vbox7\.com/play:(?P<id>[^/]+)'
+ _VALID_URL = r'http://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)'
_TEST = {
'url': 'http://vbox7.com/play:249bb972c2',
'md5': '99f65c0c9ef9b682b97313e052734c3f',
_TEST = {
'url': 'http://vbox7.com/play:249bb972c2',
'md5': '99f65c0c9ef9b682b97313e052734c3f',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
redirect_page, urlh = self._download_webpage_handle(url, video_id)
new_location = self._search_regex(r'window\.location = \'(.*)\';',
redirect_page, urlh = self._download_webpage_handle(url, video_id)
new_location = self._search_regex(r'window\.location = \'(.*)\';',
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
# VeeHD seems to send garbage on the first request.
# See https://github.com/rg3/youtube-dl/issues/2102
# VeeHD seems to send garbage on the first request.
# See https://github.com/rg3/youtube-dl/issues/2102
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
int_or_none,
ExtractorError,
)
int_or_none,
ExtractorError,
)
import xml.etree.ElementTree
from .common import InfoExtractor
import xml.etree.ElementTree
from .common import InfoExtractor
+)
+from ..utils import (
from __future__ import unicode_literals
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
+from ..compat import compat_urlparse
from .internetvideoarchive import InternetVideoArchiveIE
from .internetvideoarchive import InternetVideoArchiveIE
-from ..utils import compat_urlparse
class VideoDetectiveIE(InfoExtractor):
class VideoDetectiveIE(InfoExtractor):
'ext': 'mp4',
'title': 'KICK-ASS 2',
'description': 'md5:65ba37ad619165afac7d432eaded6013',
'ext': 'mp4',
'title': 'KICK-ASS 2',
'description': 'md5:65ba37ad619165afac7d432eaded6013',
},
}
def _real_extract(self, url):
},
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
og_video = self._og_search_video_url(webpage)
query = compat_urlparse.urlparse(og_video).query
webpage = self._download_webpage(url, video_id)
og_video = self._og_search_video_url(webpage)
query = compat_urlparse.urlparse(og_video).query
# coding: utf-8
from __future__ import unicode_literals
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
+)
+from ..utils import (
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
webpage = self._download_webpage(url, video_id)
url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
webpage = self._download_webpage(url, video_id)
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_request,
+)
from ..utils import (
ExtractorError,
from ..utils import (
ExtractorError,
- compat_urllib_request,
- compat_urllib_parse,
- compat_str,
unescapeHTML,
unified_strdate,
unescapeHTML,
unified_strdate,
class VKIE(InfoExtractor):
class VKIE(InfoExtractor):
from __future__ import unicode_literals
import re
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .common import InfoExtractor
compat_urllib_parse,
compat_urllib_request,
)
compat_urllib_parse,
compat_urllib_request,
)
}]
def _real_extract(self, url):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
fields = dict(re.findall(r'''(?x)<input\s+
webpage = self._download_webpage(url, video_id)
fields = dict(re.findall(r'''(?x)<input\s+
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+)
from ..utils import (
int_or_none,
from ..utils import (
int_or_none,
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_parse_urlparse,
+)
+from ..utils import (
ExtractorError,
parse_duration,
qualities,
ExtractorError,
parse_duration,
qualities,
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
ad_m = re.search(
r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage)
if ad_m:
ad_m = re.search(
r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage)
if ad_m:
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
compat_parse_qs,
compat_urlparse,
compat_parse_qs,
compat_urlparse,
+)
+from ..utils import (
determine_ext,
unified_strdate,
)
determine_ext,
unified_strdate,
)
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
- 'ext': determine_ext(url),
'user_agent': 'mobile',
}
'user_agent': 'mobile',
}
from __future__ import unicode_literals
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
-from ..utils import ExtractorError, compat_urllib_request
+from ..compat import compat_urllib_request
+from ..utils import ExtractorError
class WistiaIE(InfoExtractor):
class WistiaIE(InfoExtractor):
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
request = compat_urllib_request.Request(self._API_URL.format(video_id))
request.add_header('Referer', url) # Some videos require this.
request = compat_urllib_request.Request(self._API_URL.format(video_id))
request.add_header('Referer', url) # Some videos require this.
from __future__ import unicode_literals
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- m = re.match(self._VALID_URL, url)
- video_id = m.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1[^>]*>(.*?)</h1>', webpage, 'title')
title = self._html_search_regex(
r'<h1[^>]*>(.*?)</h1>', webpage, 'title')
# encoding: utf-8
from __future__ import unicode_literals
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .common import InfoExtractor
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- # Get webpage content
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(r'flv_url=(.*?)&',
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(r'flv_url=(.*?)&',
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
+)
+from ..utils import (
parse_duration,
str_to_int,
)
class XTubeIE(InfoExtractor):
parse_duration,
str_to_int,
)
class XTubeIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<videoid>[^/?&]+))'
+ _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<id>[^/?&]+))'
_TEST = {
'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
_TEST = {
'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
+ video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
import re
from .common import InfoExtractor
import re
from .common import InfoExtractor
+)
+from ..utils import (
)
class XVideosIE(InfoExtractor):
)
class XVideosIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+ _VALID_URL = r'https?://(?:www\.)?xvideos\.com/video(?P<id>[0-9]+)(?:.*)'
_TEST = {
'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl',
'md5': '4b46ae6ea5e6e9086e714d883313c0c9',
_TEST = {
'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl',
'md5': '4b46ae6ea5e6e9086e714d883313c0c9',
}
def _real_extract(self, url):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage = self._download_webpage(url, video_id)
- self.report_extraction(video_id)
-
mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
if mobj:
raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
if mobj:
raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
video_url = compat_urllib_parse.unquote(
self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
video_url = compat_urllib_parse.unquote(
self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
video_title = self._html_search_regex(
r'<title>(.*?)\s+-\s+XVID', webpage, 'title')
video_title = self._html_search_regex(
r'<title>(.*?)\s+-\s+XVID', webpage, 'title')
-
- # Extract video thumbnail
video_thumbnail = self._search_regex(
r'url_bigthumb=(.+?)&', webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
video_thumbnail = self._search_regex(
r'url_bigthumb=(.+?)&', webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
- 'uploader': None,
- 'upload_date': None,
'title': video_title,
'ext': 'flv',
'thumbnail': video_thumbnail,
'title': video_title,
'ext': 'flv',
'thumbnail': video_thumbnail,
import re
from .common import InfoExtractor, SearchInfoExtractor
import re
from .common import InfoExtractor, SearchInfoExtractor
-from ..utils import (
- ExtractorError,
compat_urllib_parse,
compat_urlparse,
compat_urllib_parse,
compat_urlparse,
+)
+from ..utils import (
import json
from .common import InfoExtractor
import json
from .common import InfoExtractor
-from ..utils import compat_urllib_parse
+from ..compat import compat_urllib_parse
class YnetIE(InfoExtractor):
class YnetIE(InfoExtractor):
import sys
from .common import InfoExtractor
import sys
from .common import InfoExtractor
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse_urlparse,
compat_urllib_request,
+)
+from ..utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
ExtractorError,
unescapeHTML,
unified_strdate,
+from .compat import compat_str
ExtractorError,
struct_unpack,
)
ExtractorError,
struct_unpack,
)