]> gitweb @ CieloNegro.org - youtube-dl.git/commitdiff
[compat] Add compat_urllib_parse_urlencode and eliminate encode_dict
authorSergey M․ <dstftw@gmail.com>
Fri, 25 Mar 2016 19:46:57 +0000 (01:46 +0600)
committerSergey M․ <dstftw@gmail.com>
Fri, 25 Mar 2016 19:46:57 +0000 (01:46 +0600)
encode_dict functionality has been improved and moved directly into compat_urllib_parse_urlencode
All occurrences of compat_urllib_parse.urlencode throughout the codebase have been replaced by compat_urllib_parse_urlencode

Closes #8974

84 files changed:
youtube_dl/compat.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/animeondemand.py
youtube_dl/extractor/atresplayer.py
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/camdemy.py
youtube_dl/extractor/ceskatelevize.py
youtube_dl/extractor/cloudy.py
youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/common.py
youtube_dl/extractor/condenast.py
youtube_dl/extractor/crunchyroll.py
youtube_dl/extractor/daum.py
youtube_dl/extractor/dcn.py
youtube_dl/extractor/dramafever.py
youtube_dl/extractor/eroprofile.py
youtube_dl/extractor/fc2.py
youtube_dl/extractor/fivemin.py
youtube_dl/extractor/flickr.py
youtube_dl/extractor/funimation.py
youtube_dl/extractor/gdcvault.py
youtube_dl/extractor/hotnewhiphop.py
youtube_dl/extractor/hypem.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/iqiyi.py
youtube_dl/extractor/ivideon.py
youtube_dl/extractor/kaltura.py
youtube_dl/extractor/laola1tv.py
youtube_dl/extractor/leeco.py
youtube_dl/extractor/lynda.py
youtube_dl/extractor/matchtv.py
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/minhateca.py
youtube_dl/extractor/mitele.py
youtube_dl/extractor/moevideo.py
youtube_dl/extractor/moniker.py
youtube_dl/extractor/mooshare.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/muzu.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/naver.py
youtube_dl/extractor/nba.py
youtube_dl/extractor/neteasemusic.py
youtube_dl/extractor/nextmovie.py
youtube_dl/extractor/nfb.py
youtube_dl/extractor/nhl.py
youtube_dl/extractor/nick.py
youtube_dl/extractor/niconico.py
youtube_dl/extractor/noco.py
youtube_dl/extractor/novamov.py
youtube_dl/extractor/npr.py
youtube_dl/extractor/ooyala.py
youtube_dl/extractor/patreon.py
youtube_dl/extractor/played.py
youtube_dl/extractor/playtvak.py
youtube_dl/extractor/pluralsight.py
youtube_dl/extractor/porn91.py
youtube_dl/extractor/primesharetv.py
youtube_dl/extractor/promptfile.py
youtube_dl/extractor/prosiebensat1.py
youtube_dl/extractor/shahid.py
youtube_dl/extractor/shared.py
youtube_dl/extractor/sharesix.py
youtube_dl/extractor/sina.py
youtube_dl/extractor/smotri.py
youtube_dl/extractor/sohu.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/streamcloud.py
youtube_dl/extractor/telecinco.py
youtube_dl/extractor/tubitv.py
youtube_dl/extractor/twitch.py
youtube_dl/extractor/udemy.py
youtube_dl/extractor/vbox7.py
youtube_dl/extractor/viddler.py
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vk.py
youtube_dl/extractor/vlive.py
youtube_dl/extractor/vodlocker.py
youtube_dl/extractor/xfileshare.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/yandexmusic.py
youtube_dl/extractor/youku.py
youtube_dl/extractor/youtube.py
youtube_dl/utils.py

index dbb91a6ef19ecdc5b1ca396424dda33d8107b2b4..76b6b0e3838c65c2d5814d0206c18dfe713d6435 100644 (file)
@@ -169,6 +169,31 @@ except ImportError:  # Python 2
         string = string.replace('+', ' ')
         return compat_urllib_parse_unquote(string, encoding, errors)
 
+try:
+    from urllib.parse import urlencode as compat_urllib_parse_urlencode
+except ImportError:  # Python 2
+    # Python 2 will choke in urlencode on mixture of byte and unicode strings.
+    # Possible solutions are to either port it from python 3 with all
+    # the friends or manually ensure input query contains only byte strings.
+    # We will stick with latter thus recursively encoding the whole query.
+    def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
+        def encode_elem(e):
+            if isinstance(e, dict):
+                e = encode_dict(e)
+            elif isinstance(e, (list, tuple,)):
+                e = encode_list(e)
+            elif isinstance(e, compat_str):
+                e = e.encode(encoding)
+            return e
+
+        def encode_dict(d):
+            return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
+
+        def encode_list(l):
+            return [encode_elem(e) for e in l]
+
+        return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
+
 try:
     from urllib.request import DataHandler as compat_urllib_request_DataHandler
 except ImportError:  # Python < 3.4
@@ -588,6 +613,7 @@ __all__ = [
     'compat_urllib_parse_unquote',
     'compat_urllib_parse_unquote_plus',
     'compat_urllib_parse_unquote_to_bytes',
+    'compat_urllib_parse_urlencode',
     'compat_urllib_parse_urlparse',
     'compat_urllib_request',
     'compat_urllib_request_DataHandler',
index fb1cc02e11f38ba4961a66e418c1f66af403e853..55a9322a753829e90715a76bc91e06828c460531 100644 (file)
@@ -6,7 +6,7 @@ from .common import InfoExtractor
 from ..compat import (
     compat_HTTPError,
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
 )
 from ..utils import (
@@ -60,7 +60,7 @@ class AddAnimeIE(InfoExtractor):
             confirm_url = (
                 parsed_url.scheme + '://' + parsed_url.netloc +
                 action + '?' +
-                compat_urllib_parse.urlencode({
+                compat_urllib_parse_urlencode({
                     'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
             self._download_webpage(
                 confirm_url, video_id,
index 2cede55a7e497f16a49a03ad8476d96f1ae9b433..9b01e38f5fe8b5a80b2635061433cc214fb1b315 100644 (file)
@@ -9,7 +9,6 @@ from ..compat import (
 )
 from ..utils import (
     determine_ext,
-    encode_dict,
     extract_attributes,
     ExtractorError,
     sanitized_Request,
@@ -71,7 +70,7 @@ class AnimeOnDemandIE(InfoExtractor):
             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
 
         request = sanitized_Request(
-            post_url, urlencode_postdata(encode_dict(login_form)))
+            post_url, urlencode_postdata(login_form))
         request.add_header('Referer', self._LOGIN_URL)
 
         response = self._download_webpage(
index b8f9ae005fbb0ab41589258886b8342eba0ac288..f9568cb5b89ad554cdfc6b1c08b99788e840a20a 100644 (file)
@@ -8,7 +8,7 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     int_or_none,
@@ -86,7 +86,7 @@ class AtresPlayerIE(InfoExtractor):
         }
 
         request = sanitized_Request(
-            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         response = self._download_webpage(
             request, None, 'Logging in as %s' % username)
index da986e06350d047f9a70dc4f42bfeb27b04afd0e..1a2eef48dcbff701977103ae5d865debaa4906fa 100644 (file)
@@ -5,7 +5,7 @@ import itertools
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_str,
 )
 from ..utils import (
@@ -58,7 +58,7 @@ class BambuserIE(InfoExtractor):
         }
 
         request = sanitized_Request(
-            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         request.add_header('Referer', self._LOGIN_URL)
         response = self._download_webpage(
             request, None, 'Logging in as %s' % username)
index dd4d96cecd82764aa8ae77203d5a92e43ffe3acc..6ffbeabd371fd6f80a9ead1d23762f760a13ba2f 100644 (file)
@@ -6,7 +6,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -139,7 +139,7 @@ class CamdemyFolderIE(InfoExtractor):
         parsed_url = list(compat_urlparse.urlparse(url))
         query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
         query.update({'displayMode': 'list'})
-        parsed_url[4] = compat_urllib_parse.urlencode(query)
+        parsed_url[4] = compat_urllib_parse_urlencode(query)
         final_url = compat_urlparse.urlunparse(parsed_url)
 
         page = self._download_webpage(final_url, folder_id)
index b355111cbef2306805b4a3c445b87a275b4697ab..d93108df5425b1a2f3296c22e7e8ced0438c9a00 100644 (file)
@@ -5,8 +5,8 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
 )
 from ..utils import (
@@ -102,7 +102,7 @@ class CeskaTelevizeIE(InfoExtractor):
 
         req = sanitized_Request(
             'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
-            data=compat_urllib_parse.urlencode(data))
+            data=compat_urllib_parse_urlencode(data))
 
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('x-addr', '127.0.0.1')
index 0fa720ee8745cfc728b4413b41888e17787fb5db..9e267e6c0260e0391ff04b61c613a2fb6d916313 100644 (file)
@@ -6,7 +6,7 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_parse_qs,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_HTTPError,
 )
 from ..utils import (
@@ -64,7 +64,7 @@ class CloudyIE(InfoExtractor):
                 'errorUrl': error_url,
             })
 
-        data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
+        data_url = self._API_URL % (video_host, compat_urllib_parse_urlencode(form))
         player_data = self._download_webpage(
             data_url, video_id, 'Downloading player data')
         data = compat_parse_qs(player_data)
index 5b1b99675c760a7249bcdb23ff3072af86710114..0c59102e072594857cc0f1c53e15c183b1885a93 100644 (file)
@@ -5,7 +5,7 @@ import re
 from .mtv import MTVServicesInfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -201,7 +201,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
         # Correct cc.com in uri
         uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
 
-        index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
+        index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse_urlencode({'uri': uri}))
         idoc = self._download_xml(
             index_url, epTitle,
             'Downloading show index', 'Unable to download episode index')
index 770105a5b58013bbcf76e342280b76827724dae4..b412fd030be3e5546ba90966e9bcf0bc54b05ba2 100644 (file)
@@ -21,7 +21,7 @@ from ..compat import (
     compat_os_name,
     compat_str,
     compat_urllib_error,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -1300,7 +1300,7 @@ class InfoExtractor(object):
                         'plugin': 'flowplayer-3.2.0.1',
                     }
                 f4m_url += '&' if '?' in f4m_url else '?'
-                f4m_url += compat_urllib_parse.urlencode(f4m_params)
+                f4m_url += compat_urllib_parse_urlencode(f4m_params)
                 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
                 continue
 
index 054978ff23b367484c32c2142906c27c28379b38..e8f2b5a07591410c16fe6fe096678a12006abe48 100644 (file)
@@ -5,7 +5,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urlparse,
 )
@@ -97,7 +97,7 @@ class CondeNastIE(InfoExtractor):
         video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
         player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
         target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
-        data = compat_urllib_parse.urlencode({'videoId': video_id,
+        data = compat_urllib_parse_urlencode({'videoId': video_id,
                                               'playerId': player_id,
                                               'target': target,
                                               })
index 85fa7a725618c834be36070b38f4ae1c0eb79add..7746f1be3198186865cd676dc7682defdb78a395 100644 (file)
@@ -11,8 +11,8 @@ from math import pow, sqrt, floor
 from .common import InfoExtractor
 from ..compat import (
     compat_etree_fromstring,
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
     compat_urllib_request,
     compat_urlparse,
 )
@@ -78,7 +78,7 @@ class CrunchyrollBaseIE(InfoExtractor):
         # See https://github.com/rg3/youtube-dl/issues/7202.
         qs['skip_wall'] = ['1']
         return compat_urlparse.urlunparse(
-            parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+            parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
 
 class CrunchyrollIE(CrunchyrollBaseIE):
@@ -308,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 
         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
         playerdata_req = sanitized_Request(playerdata_url)
-        playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
+        playerdata_req.data = compat_urllib_parse_urlencode({'current_page': webpage_url})
         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
 
@@ -322,7 +322,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             streamdata_req = sanitized_Request(
                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
                 % (stream_id, stream_format, stream_quality),
-                compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
+                compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             streamdata = self._download_xml(
                 streamdata_req, video_id,
index c84c5105886c73f613ef3adae1a7e1831be83f1d..86024a745661dda2da9d3fb883ccf4db017a722c 100644 (file)
@@ -8,8 +8,8 @@ import itertools
 from .common import InfoExtractor
 from ..compat import (
     compat_parse_qs,
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -70,7 +70,7 @@ class DaumIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = compat_urllib_parse_unquote(self._match_id(url))
-        query = compat_urllib_parse.urlencode({'vid': video_id})
+        query = compat_urllib_parse_urlencode({'vid': video_id})
         movie_data = self._download_json(
             'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
             video_id, 'Downloading video formats info')
@@ -86,7 +86,7 @@ class DaumIE(InfoExtractor):
         formats = []
         for format_el in movie_data['output_list']['output_list']:
             profile = format_el['profile']
-            format_query = compat_urllib_parse.urlencode({
+            format_query = compat_urllib_parse_urlencode({
                 'vid': video_id,
                 'profile': profile,
             })
index 15a1c40f7a07c36136a0142148af3f7f44f49733..982ed94ea403f9ab98c4d505cff575be180e3cb0 100644 (file)
@@ -6,7 +6,7 @@ import base64
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_str,
 )
 from ..utils import (
@@ -106,7 +106,7 @@ class DCNVideoIE(DCNBaseIE):
 
         webpage = self._download_webpage(
             'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
-            compat_urllib_parse.urlencode({
+            compat_urllib_parse_urlencode({
                 'id': video_data['id'],
                 'user_id': video_data['user_id'],
                 'signature': video_data['signature'],
@@ -133,7 +133,7 @@ class DCNLiveIE(DCNBaseIE):
 
         webpage = self._download_webpage(
             'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
-            compat_urllib_parse.urlencode({
+            compat_urllib_parse_urlencode({
                 'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
                 'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
                 'signature': channel_data['signature'],
@@ -174,7 +174,7 @@ class DCNSeasonIE(InfoExtractor):
         data['show_id'] = show_id
         request = sanitized_Request(
             'http://admin.mangomolo.com/analytics/index.php/plus/show',
-            compat_urllib_parse.urlencode(data),
+            compat_urllib_parse_urlencode(data),
             {
                 'Origin': 'http://www.dcndigital.ae',
                 'Content-Type': 'application/x-www-form-urlencoded'
index d35e88881d859555d20914240fff733508a4ac42..2101acaafd94919e246ecd3ae8b9fdd559816a7d 100644 (file)
@@ -6,7 +6,7 @@ import itertools
 from .amp import AMPIE
 from ..compat import (
     compat_HTTPError,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -50,7 +50,7 @@ class DramaFeverBaseIE(AMPIE):
         }
 
         request = sanitized_Request(
-            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         response = self._download_webpage(
             request, None, 'Logging in as %s' % username)
 
index 7fcd0151d8efdfd2b2378c0097b363e563e3171b..297f8a6f5fa4371415554bfe6c44d0745c262491 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     unescapeHTML
@@ -43,7 +43,7 @@ class EroProfileIE(InfoExtractor):
         if username is None:
             return
 
-        query = compat_urllib_parse.urlencode({
+        query = compat_urllib_parse_urlencode({
             'username': username,
             'password': password,
             'url': 'http://www.eroprofile.com/',
index 508684d2eec8786c83ce3dce8cb4e8f85fa56673..cacf61973082e01057b13fdb9174692e625d0d5b 100644 (file)
@@ -5,12 +5,11 @@ import hashlib
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_request,
     compat_urlparse,
 )
 from ..utils import (
-    encode_dict,
     ExtractorError,
     sanitized_Request,
 )
@@ -57,7 +56,7 @@ class FC2IE(InfoExtractor):
             'Submit': ' Login ',
         }
 
-        login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
+        login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
         request = sanitized_Request(
             'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
 
index 67d50a386ce812018047f711205b8619d75c1bf8..6b834541636533d808ce396ae456f980f989c731 100644 (file)
@@ -4,8 +4,8 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
     compat_parse_qs,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urlparse,
 )
@@ -109,7 +109,7 @@ class FiveMinIE(InfoExtractor):
 
         response = self._download_json(
             'https://syn.5min.com/handlers/SenseHandler.ashx?' +
-            compat_urllib_parse.urlencode({
+            compat_urllib_parse_urlencode({
                 'func': 'GetResults',
                 'playlist': video_id,
                 'sid': sid,
index 18f439df978b59e8d48454300498e1de337c3ad4..0a3de14988dc06e92a7a27e52c4c7838caf69b2b 100644 (file)
@@ -1,7 +1,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -42,7 +42,7 @@ class FlickrIE(InfoExtractor):
         }
         if secret:
             query['secret'] = secret
-        data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note)
+        data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note)
         if data['stat'] != 'ok':
             raise ExtractorError(data['message'])
         return data
index 0f37ed7863c93da4813a6ac6be2d6402fc7bbd0a..1eb528f31f4b908b8d832cfe1fd4e1647ef74058 100644 (file)
@@ -5,7 +5,6 @@ from .common import InfoExtractor
 from ..utils import (
     clean_html,
     determine_ext,
-    encode_dict,
     int_or_none,
     sanitized_Request,
     ExtractorError,
@@ -54,10 +53,10 @@ class FunimationIE(InfoExtractor):
         (username, password) = self._get_login_info()
         if username is None:
             return
-        data = urlencode_postdata(encode_dict({
+        data = urlencode_postdata({
             'email_field': username,
             'password_field': password,
-        }))
+        })
         login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
             'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
             'Content-Type': 'application/x-www-form-urlencoded'
index 3befd3e7b8f9ffeb48a31a284b5971b3a8a5cfab..cc8fa45d29e2bc79807d1101aecdec05aa3466d1 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     remove_end,
     HEADRequest,
@@ -123,7 +123,7 @@ class GDCVaultIE(InfoExtractor):
             'password': password,
         }
 
-        request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form))
+        request = sanitized_Request(login_url, compat_urllib_parse_urlencode(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self._download_webpage(request, display_id, 'Logging in')
         start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
index efc3e8429956b38016e3b9a6c7d84cbaed367ff8..152d2a98a1ecb4bd8059631355adf72e1d4d1c19 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import base64
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     HEADRequest,
@@ -35,7 +35,7 @@ class HotNewHipHopIE(InfoExtractor):
                 r'"contentUrl" content="(.*?)"', webpage, 'content URL')
             return self.url_result(video_url, ie='Youtube')
 
-        reqdata = compat_urllib_parse.urlencode([
+        reqdata = compat_urllib_parse_urlencode([
             ('mediaType', 's'),
             ('mediaId', video_id),
         ])
index e0ab318022ba4291771d2e9d146566e4f0437daf..f7c9130540e51a75a83052704d61403b488b25f6 100644 (file)
@@ -4,7 +4,7 @@ import json
 import time
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     sanitized_Request,
@@ -28,7 +28,7 @@ class HypemIE(InfoExtractor):
         track_id = self._match_id(url)
 
         data = {'ax': 1, 'ts': time.time()}
-        request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data))
+        request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
         response, urlh = self._download_webpage_handle(
             request, track_id, 'Downloading webpage with the url')
 
index 483cc6f9e62da3bc272ba66efc540b95c17116e7..e60145b3dc5dc80f921c86a3b03a59cf5844b60e 100644 (file)
@@ -5,7 +5,7 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_urlparse,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     xpath_with_ns,
@@ -38,7 +38,7 @@ class InternetVideoArchiveIE(InfoExtractor):
         # Other player ids return m3u8 urls
         cleaned_dic['playerid'] = '247'
         cleaned_dic['videokbrate'] = '100000'
-        return compat_urllib_parse.urlencode(cleaned_dic)
+        return compat_urllib_parse_urlencode(cleaned_dic)
 
     def _real_extract(self, url):
         query = compat_urlparse.urlparse(url).query
index ffcea30ad7c4aff6b9a1df97cd70f9524b23f3c5..9e8c9432a6947ad2ad1866e257e577c98c3ac38b 100644 (file)
@@ -14,7 +14,7 @@ from .common import InfoExtractor
 from ..compat import (
     compat_parse_qs,
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
 )
 from ..utils import (
@@ -322,7 +322,7 @@ class IqiyiIE(InfoExtractor):
             'bird_t': timestamp,
         }
         validation_result = self._download_json(
-            'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None,
+            'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
             note='Validate credentials', errnote='Unable to validate credentials')
 
         MSG_MAP = {
@@ -456,7 +456,7 @@ class IqiyiIE(InfoExtractor):
                         'QY00001': auth_result['data']['u'],
                     })
                 api_video_url += '?' if '?' not in api_video_url else '&'
-                api_video_url += compat_urllib_parse.urlencode(param)
+                api_video_url += compat_urllib_parse_urlencode(param)
                 js = self._download_json(
                     api_video_url, video_id,
                     note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
@@ -494,7 +494,7 @@ class IqiyiIE(InfoExtractor):
         }
 
         api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
-            compat_urllib_parse.urlencode(param)
+            compat_urllib_parse_urlencode(param)
         raw_data = self._download_json(api_url, video_id)
         return raw_data
 
index 617dc8c071d0ab983a74a95184a47ee5a6f82525..3ca824f7984f3cac8615644c5babbcacb4e5c4a3 100644 (file)
@@ -5,7 +5,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import qualities
@@ -62,7 +62,7 @@ class IvideonIE(InfoExtractor):
         quality = qualities(self._QUALITIES)
 
         formats = [{
-            'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse.urlencode({
+            'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse_urlencode({
                 'server': server_id,
                 'camera': camera_id,
                 'sessionId': 'demo',
index 44d7c84a13f9bef9aa1d68dc5d38fe81b0af4a5f..a65697ff558864f36cc5e8b8f82f959b19ea16fc 100644 (file)
@@ -6,7 +6,7 @@ import base64
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
     compat_parse_qs,
 )
@@ -71,7 +71,7 @@ class KalturaIE(InfoExtractor):
                 for k, v in a.items():
                     params['%d:%s' % (i, k)] = v
 
-        query = compat_urllib_parse.urlencode(params)
+        query = compat_urllib_parse_urlencode(params)
         url = self._API_BASE + query
         data = self._download_json(url, video_id, *args, **kwargs)
 
index 41d80bc12e69aa8ef4d54ff71ca667754d6b2409..d9dc067d2bb08555bd0f0078c20952744827e463 100644 (file)
@@ -5,7 +5,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -90,7 +90,7 @@ class Laola1TvIE(InfoExtractor):
 
         hd_doc = self._download_xml(
             'http://www.laola1.tv/server/hd_video.php?%s'
-            % compat_urllib_parse.urlencode({
+            % compat_urllib_parse_urlencode({
                 'play': video_id,
                 'partner': partner_id,
                 'portal': portal,
@@ -108,7 +108,7 @@ class Laola1TvIE(InfoExtractor):
 
         req = sanitized_Request(
             'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
-            compat_urllib_parse.urlencode({
+            compat_urllib_parse_urlencode({
                 'videoId': video_id,
                 'target': VS_TARGETS.get(kind, '2'),
                 'label': _v('label'),
index 462b752dd9b85af0f845f5677bc3128eb6780590..375fdaed129421371f8575c3aeceb71ed4712de7 100644 (file)
@@ -11,7 +11,7 @@ from .common import InfoExtractor
 from ..compat import (
     compat_ord,
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     determine_ext,
@@ -122,7 +122,7 @@ class LeIE(InfoExtractor):
             'domain': 'www.le.com'
         }
         play_json_req = sanitized_Request(
-            'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
+            'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse_urlencode(params)
         )
         cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
         if cn_verification_proxy:
@@ -151,7 +151,7 @@ class LeIE(InfoExtractor):
         for format_id in formats:
             if format_id in dispatch:
                 media_url = playurl['domain'][0] + dispatch[format_id][0]
-                media_url += '&' + compat_urllib_parse.urlencode({
+                media_url += '&' + compat_urllib_parse_urlencode({
                     'm3v': 1,
                     'format': 1,
                     'expect': 3,
@@ -305,7 +305,7 @@ class LetvCloudIE(InfoExtractor):
             }
             self.sign_data(data)
             return self._download_json(
-                'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse.urlencode(data),
+                'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse_urlencode(data),
                 media_id, 'Downloading playJson data for type %s' % cf)
 
         play_json = get_play_json(cf, time.time())
index d4e1ae99d4d91c887cfc2d55c29d8211fc48a80f..df50cb655365223dd6335c9772dca9a6bbad7091 100644 (file)
@@ -6,7 +6,7 @@ import json
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -36,7 +36,7 @@ class LyndaBaseIE(InfoExtractor):
             'stayPut': 'false'
         }
         request = sanitized_Request(
-            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(
             request, None, 'Logging in as %s' % username)
 
@@ -65,7 +65,7 @@ class LyndaBaseIE(InfoExtractor):
                     'stayPut': 'false',
                 }
                 request = sanitized_Request(
-                    self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
+                    self._LOGIN_URL, compat_urllib_parse_urlencode(confirm_form).encode('utf-8'))
                 login_page = self._download_webpage(
                     request, None,
                     'Confirming log in and log out from another device')
index 28e0dfe63348082f65df20463ab8a2001d8592fa..e33bfde3b6c2540cfc8224b61afc78f7800dafb9 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import random
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     sanitized_Request,
     xpath_text,
@@ -29,7 +29,7 @@ class MatchTVIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = 'matchtv-live'
         request = sanitized_Request(
-            'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse.urlencode({
+            'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse_urlencode({
                 'ts': '',
                 'quality': 'SD',
                 'contentId': '561d2c0df7159b37178b4567',
index c31e8798ae7ccdbaa8ef9baa481336ef8a5e8dd9..0e486544616583a4c5a8ed6cce96264152dbfcae 100644 (file)
@@ -5,8 +5,8 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_parse_qs,
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     determine_ext,
@@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor):
             'filters': '0',
             'submit': "Continue - I'm over 18",
         }
-        request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+        request = sanitized_Request(self._FILTER_POST, compat_urllib_parse_urlencode(disclaimer_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self.report_age_confirmation()
         self._download_webpage(request, None, False, 'Unable to confirm age')
index e46b23a6f73990e14e73d911cb2e8253fba08a85..6ec53c3036ef044eb7761719c95db4ed982dea37 100644 (file)
@@ -2,7 +2,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     int_or_none,
     parse_duration,
@@ -39,7 +39,7 @@ class MinhatecaIE(InfoExtractor):
         ]
         req = sanitized_Request(
             'http://minhateca.com.br/action/License/Download',
-            data=compat_urllib_parse.urlencode(token_data))
+            data=compat_urllib_parse_urlencode(token_data))
         req.add_header('Content-Type', 'application/x-www-form-urlencoded')
         data = self._download_json(
             req, video_id, note='Downloading metadata')
index 9e584860a2e4ac98596093b701910c0060d978b9..76ced7928e856ba6bd3ddc322b189e3be2162f04 100644 (file)
@@ -2,11 +2,10 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
-    encode_dict,
     get_element_by_attribute,
     int_or_none,
 )
@@ -60,7 +59,7 @@ class MiTeleIE(InfoExtractor):
                 'sta': '0',
             }
             media = self._download_json(
-                '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
+                '%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)),
                 display_id, 'Downloading %s JSON' % location['loc'])
             file_ = media.get('file')
             if not file_:
index d930b96343bd081f029ad1bb7ce1ceee3c241f59..89cdd46000eb489f5f249eadeefc2b877f6b7fcb 100644 (file)
@@ -5,7 +5,7 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -77,7 +77,7 @@ class MoeVideoIE(InfoExtractor):
             ],
         ]
         r_json = json.dumps(r)
-        post = compat_urllib_parse.urlencode({'r': r_json})
+        post = compat_urllib_parse_urlencode({'r': r_json})
         req = sanitized_Request(self._API_URL, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
index f6bf94f2f6b3d6868fa098994a2d275f8bb59a99..c5ce693f15e55278883b576b8cb3f9095a63f223 100644 (file)
@@ -5,7 +5,7 @@ import os.path
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     remove_start,
@@ -88,7 +88,7 @@ class MonikerIE(InfoExtractor):
             fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
             data = dict(fields)
 
-            post = compat_urllib_parse.urlencode(data)
+            post = compat_urllib_parse_urlencode(data)
             headers = {
                 b'Content-Type': b'application/x-www-form-urlencoded',
             }
index f010f52d50b40685000c8eab0e20b1373badcc9d..ee3947f43807055c577641e439096f61981c7575 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     sanitized_Request,
@@ -58,7 +58,7 @@ class MooshareIE(InfoExtractor):
         }
 
         request = sanitized_Request(
-            'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
+            'http://mooshare.biz/%s' % video_id, compat_urllib_parse_urlencode(download_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
         self._sleep(5, video_id)
index 824bbcb4e6d58d3856e521c6de055346164dd1db..640ee3d9339c48e2b3fef0ade15ee8ebcae8b292 100644 (file)
@@ -4,7 +4,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_str,
 )
 from ..utils import (
@@ -171,7 +171,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         data = {'uri': uri}
         if self._LANG:
             data['lang'] = self._LANG
-        return compat_urllib_parse.urlencode(data)
+        return compat_urllib_parse_urlencode(data)
 
     def _get_videos_info(self, uri):
         video_id = self._id_from_uri(uri)
index 1e9cf8de9174e086dd7c19525a7dc94025075683..cbc800481bc16528883a6be58357a38dcbd2c195 100644 (file)
@@ -1,9 +1,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse_urlencode
 
 
 class MuzuTVIE(InfoExtractor):
@@ -25,7 +23,7 @@ class MuzuTVIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        info_data = compat_urllib_parse.urlencode({
+        info_data = compat_urllib_parse_urlencode({
             'format': 'json',
             'url': url,
         })
@@ -41,7 +39,7 @@ class MuzuTVIE(InfoExtractor):
             if video_info.get('v%s' % quality):
                 break
 
-        data = compat_urllib_parse.urlencode({
+        data = compat_urllib_parse_urlencode({
             'ai': video_id,
             # Even if each time you watch a video the hash changes,
             # it seems to work for different videos, and it will work
index c83a1eab5ba6b90229422bce078c9ae170fa5069..6d447a4935e49cd3c4f7525fff6ffe5e9883656e 100644 (file)
@@ -9,8 +9,8 @@ import json
 from .common import InfoExtractor
 from ..compat import (
     compat_ord,
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -112,7 +112,7 @@ class MyVideoIE(InfoExtractor):
                 encxml = compat_urllib_parse_unquote(b)
         if not params.get('domain'):
             params['domain'] = 'www.myvideo.de'
-        xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
+        xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params))
         if 'flash_playertype=MTV' in xmldata_url:
             self._downloader.report_warning('avoiding MTV player')
             xmldata_url = (
index 1f5fc21452322792d0a94bc75feec84d6c826858..6d6f69b440a4b91d95c42210b2e597aca99144f6 100644 (file)
@@ -5,7 +5,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -53,8 +53,8 @@ class NaverIE(InfoExtractor):
             raise ExtractorError('couldn\'t extract vid and key')
         vid = m_id.group(1)
         key = m_id.group(2)
-        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
-        query_urls = compat_urllib_parse.urlencode({
+        query = compat_urllib_parse_urlencode({'vid': vid, 'inKey': key, })
+        query_urls = compat_urllib_parse_urlencode({
             'masterVid': vid,
             'protocol': 'p2p',
             'inKey': key,
index 3e2b3e59945f4a67275ca6bc9d3c9193f531506c..d896b0d04810655c1d7c993819b88e7b32029832 100644 (file)
@@ -6,7 +6,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -97,7 +97,7 @@ class NBAIE(InfoExtractor):
     _PAGE_SIZE = 30
 
     def _fetch_page(self, team, video_id, page):
-        search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse.urlencode({
+        search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({
             'type': 'teamvideo',
             'start': page * self._PAGE_SIZE + 1,
             'npp': (page + 1) * self._PAGE_SIZE + 1,
index 7830616f8fb9498c058504d52c199c6a69a9d98b..0d36474fa069b793ff32e90e6de1804de09058ac 100644 (file)
@@ -8,7 +8,7 @@ import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_str,
     compat_itertools_count,
 )
@@ -153,7 +153,7 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
             'ids': '[%s]' % song_id
         }
         info = self.query_api(
-            'song/detail?' + compat_urllib_parse.urlencode(params),
+            'song/detail?' + compat_urllib_parse_urlencode(params),
             song_id, 'Downloading song info')['songs'][0]
 
         formats = self.extract_formats(info)
index 657ae77a0112328361433a9eb46b7daeb82ed8c6..9ccd7d774f9df3084a0429f2ff27ef2e8c866d6f 100644 (file)
@@ -2,7 +2,7 @@
 from __future__ import unicode_literals
 
 from .mtv import MTVServicesInfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 
 
 class NextMovieIE(MTVServicesInfoExtractor):
@@ -20,7 +20,7 @@ class NextMovieIE(MTVServicesInfoExtractor):
     }]
 
     def _get_feed_query(self, uri):
-        return compat_urllib_parse.urlencode({
+        return compat_urllib_parse_urlencode({
             'feed': '1505',
             'mgid': uri,
         })
index 5bd15f7a72f5aeb49d91391e11ddffaa1a52f44f..ba1eefafcae131f031685d97f84521858cdf89ba 100644 (file)
@@ -1,7 +1,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import sanitized_Request
 
 
@@ -40,7 +40,7 @@ class NFBIE(InfoExtractor):
 
         request = sanitized_Request(
             'https://www.nfb.ca/film/%s/player_config' % video_id,
-            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+            compat_urllib_parse_urlencode({'getConfig': 'true'}).encode('ascii'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
 
index 8d5ce46ad6bd2c5bdac2aff9998e8f278fca6376..c1dea8b6c2a32da6728dee430d1167c04b1747f1 100644 (file)
@@ -7,7 +7,7 @@ import os
 from .common import InfoExtractor
 from ..compat import (
     compat_urlparse,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse
 )
 from ..utils import (
@@ -38,7 +38,7 @@ class NHLBaseInfoExtractor(InfoExtractor):
             parsed_url = compat_urllib_parse_urlparse(initial_video_url)
             filename, ext = os.path.splitext(parsed_url.path)
             path = '%s_sd%s' % (filename, ext)
-            data = compat_urllib_parse.urlencode({
+            data = compat_urllib_parse_urlencode({
                 'type': 'fvod',
                 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
             })
@@ -211,7 +211,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
             r'tab0"[^>]*?>(.*?)</td>',
             webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
 
-        data = compat_urllib_parse.urlencode({
+        data = compat_urllib_parse_urlencode({
             'cid': cat_id,
             # This is the default value
             'count': 12,
index b62819ae529f2731e29d91d1071077af4fd03815..ce065f2b086adbca9c551afeb0d2437a59248d88 100644 (file)
@@ -2,7 +2,7 @@
 from __future__ import unicode_literals
 
 from .mtv import MTVServicesInfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 
 
 class NickIE(MTVServicesInfoExtractor):
@@ -54,7 +54,7 @@ class NickIE(MTVServicesInfoExtractor):
     }]
 
     def _get_feed_query(self, uri):
-        return compat_urllib_parse.urlencode({
+        return compat_urllib_parse_urlencode({
             'feed': 'nick_arc_player_prime',
             'mgid': uri,
         })
index 586e52a4a4f49151c55ed99baf9cadd136863bbd..688f0a124629f0aa15ae2057e25ebca31c461557 100644 (file)
@@ -7,11 +7,10 @@ import datetime
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
-    encode_dict,
     ExtractorError,
     int_or_none,
     parse_duration,
@@ -101,7 +100,7 @@ class NiconicoIE(InfoExtractor):
             'mail': username,
             'password': password,
         }
-        login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
+        login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
         request = sanitized_Request(
             'https://secure.nicovideo.jp/secure/login', login_data)
         login_results = self._download_webpage(
@@ -141,7 +140,7 @@ class NiconicoIE(InfoExtractor):
                 r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
 
             # Get flv info
-            flv_info_data = compat_urllib_parse.urlencode({
+            flv_info_data = compat_urllib_parse_urlencode({
                 'k': thumb_play_key,
                 'v': video_id
             })
index ec7317a2f6d16911562285871167786e31902c62..8f4b69a6fd646ed52debc3de2f04b573869870d0 100644 (file)
@@ -8,7 +8,7 @@ import hashlib
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -75,7 +75,7 @@ class NocoIE(InfoExtractor):
             'username': username,
             'password': password,
         }
-        request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse_urlencode(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
 
         login = self._download_json(request, None, 'Logging in as %s' % username)
index d68c1ad7923ac56c6bef68343a20df24e4aa5ffb..a131f7dbdd95f5cbd39add52cf3a721068085e78 100644 (file)
@@ -7,7 +7,6 @@ from ..compat import compat_urlparse
 from ..utils import (
     ExtractorError,
     NO_DEFAULT,
-    encode_dict,
     sanitized_Request,
     urlencode_postdata,
 )
@@ -73,7 +72,7 @@ class NovaMovIE(InfoExtractor):
             if not post_url.startswith('http'):
                 post_url = compat_urlparse.urljoin(url, post_url)
             request = sanitized_Request(
-                post_url, urlencode_postdata(encode_dict(fields)))
+                post_url, urlencode_postdata(fields))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             request.add_header('Referer', post_url)
             webpage = self._download_webpage(
index a3f0abb4eda4afdfb7afb5e6dec168874431e716..1777aa10b537a8359b8acb4a5b19f5d169cb00d6 100644 (file)
@@ -1,7 +1,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     int_or_none,
     qualities,
@@ -38,7 +38,7 @@ class NprIE(InfoExtractor):
         playlist_id = self._match_id(url)
 
         config = self._download_json(
-            'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({
+            'http://api.npr.org/query?%s' % compat_urllib_parse_urlencode({
                 'id': playlist_id,
                 'fields': 'titles,audio,show',
                 'format': 'json',
index 20b984288ac7bba05bef7d4a34d5f01b5cb0f851..16f040191aa31bd9e8dd49b37a42085c2b340582 100644 (file)
@@ -9,7 +9,7 @@ from ..utils import (
     ExtractorError,
     unsmuggle_url,
 )
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 
 
 class OoyalaBaseIE(InfoExtractor):
@@ -35,7 +35,7 @@ class OoyalaBaseIE(InfoExtractor):
         for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
             auth_data = self._download_json(
                 self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
-                compat_urllib_parse.urlencode({
+                compat_urllib_parse_urlencode({
                     'domain': domain,
                     'supportedFormats': supported_format
                 }),
index ec8876c28551af6e717ac49cbd22da46d096c67c..22975066516a0d37e74c9c520dd4faf0a68305a6 100644 (file)
@@ -65,7 +65,7 @@ class PatreonIE(InfoExtractor):
 
         request = sanitized_Request(
             'https://www.patreon.com/processLogin',
-            compat_urllib_parse.urlencode(login_form).encode('utf-8')
+            compat_urllib_parse_urlencode(login_form).encode('utf-8')
         )
         login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
 
index 2856af96f49cf7928a0dd4fe79ccd287592fb3c1..63065622bd21bf3145dcfae1fa25dc76e99f4f45 100644 (file)
@@ -5,7 +5,7 @@ import re
 import os.path
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     sanitized_Request,
@@ -40,7 +40,7 @@ class PlayedIE(InfoExtractor):
 
         self._sleep(2, video_id)
 
-        post = compat_urllib_parse.urlencode(data)
+        post = compat_urllib_parse_urlencode(data)
         headers = {
             b'Content-Type': b'application/x-www-form-urlencoded',
         }
index e360404f7270ebc607ae1b651fcc50445e811659..1e8096a259ad5568d87b96bd566f646ae641862f 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 from .common import InfoExtractor
 from ..compat import (
     compat_urlparse,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -106,7 +106,7 @@ class PlaytvakIE(InfoExtractor):
         })
 
         info_url = compat_urlparse.urlunparse(
-            parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+            parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
         json_info = self._download_json(
             info_url, video_id,
index 12e1c2862c2a7964b5613d7e207853c80a862b24..575775f097e4ad609f1b1fde0c152f07fd2c4a51 100644 (file)
@@ -8,7 +8,7 @@ import collections
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -76,7 +76,7 @@ class PluralsightIE(PluralsightBaseIE):
             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
 
         request = sanitized_Request(
-            post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
         response = self._download_webpage(
index 63ce87ee358004cbfae87b0744bdb02329f31372..9894f32620c1692830df023423ae02a6199121b1 100644 (file)
@@ -2,8 +2,8 @@
 from __future__ import unicode_literals
 
 from ..compat import (
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
 )
 from .common import InfoExtractor
 from ..utils import (
@@ -50,7 +50,7 @@ class Porn91IE(InfoExtractor):
             r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
         max_vid = self._search_regex(
             r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
-        url_params = compat_urllib_parse.urlencode({
+        url_params = compat_urllib_parse_urlencode({
             'VID': file_id,
             'mp4': '1',
             'seccode': sec_code,
index 85aae95765370249023d8202b9d51c44acb99a97..188f0882652f32c3a2eb8cd050f4fdfd3a771cf0 100644 (file)
@@ -1,7 +1,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     sanitized_Request,
@@ -42,7 +42,7 @@ class PrimeShareTVIE(InfoExtractor):
         self._sleep(wait_time, video_id)
 
         req = sanitized_Request(
-            url, compat_urllib_parse.urlencode(fields), headers)
+            url, compat_urllib_parse_urlencode(fields), headers)
         video_page = self._download_webpage(
             req, video_id, 'Downloading video page')
 
index d5357283addc5e1faafebff51044083eeb4fafa3..67312016c4def2cd604d03b7598012cc5b083c19 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     determine_ext,
     ExtractorError,
@@ -34,7 +34,7 @@ class PromptFileIE(InfoExtractor):
                                  expected=True)
 
         fields = self._hidden_inputs(webpage)
-        post = compat_urllib_parse.urlencode(fields)
+        post = compat_urllib_parse_urlencode(fields)
         req = sanitized_Request(url, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         webpage = self._download_webpage(
index 670e6950f3fcc67e78ecd466475e7317c8dfd826..07d49d489d6779b0f6bb7bd12bc610497c576c2e 100644 (file)
@@ -5,9 +5,7 @@ import re
 
 from hashlib import sha1
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     determine_ext,
@@ -235,7 +233,7 @@ class ProSiebenSat1IE(InfoExtractor):
         client_name = 'kolibri-2.0.19-splec4'
         client_location = url
 
-        videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
+        videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse_urlencode({
             'access_token': access_token,
             'client_location': client_location,
             'client_name': client_name,
@@ -256,7 +254,7 @@ class ProSiebenSat1IE(InfoExtractor):
         client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
                                  .encode('utf-8')).hexdigest()
 
-        sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
+        sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse_urlencode({
             'access_token': access_token,
             'client_id': client_id,
             'client_location': client_location,
@@ -270,7 +268,7 @@ class ProSiebenSat1IE(InfoExtractor):
                                           client_location, source_ids_str, g, client_name])
                                  .encode('utf-8')).hexdigest()
 
-        url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
+        url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse_urlencode({
             'access_token': access_token,
             'client_id': client_id,
             'client_location': client_location,
index 1178b7a2781eb8809b99c78cce9fe80a33cafdd6..b4433a68901c2820d2aba843ecaf1b89d32eeaf6 100644 (file)
@@ -2,7 +2,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -81,7 +81,7 @@ class ShahidIE(InfoExtractor):
         video = self._download_json(
             '%s/%s/%s?%s' % (
                 api_vars['url'], api_vars['playerType'], api_vars['id'],
-                compat_urllib_parse.urlencode({
+                compat_urllib_parse_urlencode({
                     'apiKey': 'sh@hid0nlin3',
                     'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
                 })),
index 96fe0b90d91b080218ca387d8a4403130171358d..e6644199711ab22364909ef3c91a44981449999b 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import base64
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -45,7 +45,7 @@ class SharedIE(InfoExtractor):
 
         download_form = self._hidden_inputs(webpage)
         request = sanitized_Request(
-            url, compat_urllib_parse.urlencode(download_form))
+            url, compat_urllib_parse_urlencode(download_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
         video_page = self._download_webpage(
index f1ea9bdb208c79cb59c189c5a3b17a9e1dec460a..61dc1c23539ad8b487cb78d70f0881c910f3fad5 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     parse_duration,
     sanitized_Request,
@@ -47,7 +47,7 @@ class ShareSixIE(InfoExtractor):
         fields = {
             'method_free': 'Free'
         }
-        post = compat_urllib_parse.urlencode(fields)
+        post = compat_urllib_parse_urlencode(fields)
         req = sanitized_Request(url, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
index b2258a0f64c9f985edb5354ae5c11a381ebcc8af..d03f1b1d4308d047e5b690a682587ac5655ce338 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import sanitized_Request
 
 
@@ -39,7 +39,7 @@ class SinaIE(InfoExtractor):
     ]
 
     def _extract_video(self, video_id):
-        data = compat_urllib_parse.urlencode({'vid': video_id})
+        data = compat_urllib_parse_urlencode({'vid': video_id})
         url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
                                      video_id, 'Downloading video url')
         image_page = self._download_webpage(
index 015ef75f3e9c36fa6e7447a78721ffd4d0bff74d..b4c6d5bbf7c359f1a02952abcc03f6d0c046dd78 100644 (file)
@@ -7,7 +7,7 @@ import hashlib
 import uuid
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -175,7 +175,7 @@ class SmotriIE(InfoExtractor):
             video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
 
         request = sanitized_Request(
-            'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
+            'http://smotri.com/video/view/url/bot/', compat_urllib_parse_urlencode(video_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
         video = self._download_json(request, video_id, 'Downloading video JSON')
@@ -338,7 +338,7 @@ class SmotriBroadcastIE(InfoExtractor):
             }
 
             request = sanitized_Request(
-                broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
+                broadcast_url + '/?no_redirect=1', compat_urllib_parse_urlencode(login_form))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             broadcast_page = self._download_webpage(
                 request, broadcast_id, 'Logging in and confirming age')
index ea8fc258d1e7f361bc2a75360dc4c65774674622..49e5d09ae450d11bb567a2fe95ecba55998c8b42 100644 (file)
@@ -6,7 +6,7 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -170,7 +170,7 @@ class SohuIE(InfoExtractor):
                     if retries > 0:
                         download_note += ' (retry #%d)' % retries
                     part_info = self._parse_json(self._download_webpage(
-                        'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)),
+                        'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
                         video_id, download_note), video_id)
 
                     video_url = part_info['url']
index 1efb2b980cc83bc5ffa3b14c912188973b4574b7..2bca8fa3a0db5a9bf419df78844cd8eafb822454 100644 (file)
@@ -11,10 +11,9 @@ from .common import (
 from ..compat import (
     compat_str,
     compat_urlparse,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
-    encode_dict,
     ExtractorError,
     int_or_none,
     unified_strdate,
@@ -393,7 +392,7 @@ class SoundcloudUserIE(SoundcloudIE):
         query = COMMON_QUERY.copy()
         query['offset'] = 0
 
-        next_href = base_url + '?' + compat_urllib_parse.urlencode(query)
+        next_href = base_url + '?' + compat_urllib_parse_urlencode(query)
 
         entries = []
         for i in itertools.count():
@@ -424,7 +423,7 @@ class SoundcloudUserIE(SoundcloudIE):
             qs = compat_urlparse.parse_qs(parsed_next_href.query)
             qs.update(COMMON_QUERY)
             next_href = compat_urlparse.urlunparse(
-                parsed_next_href._replace(query=compat_urllib_parse.urlencode(qs, True)))
+                parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
         return {
             '_type': 'playlist',
@@ -460,7 +459,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
         if token:
             data_dict['secret_token'] = token
 
-        data = compat_urllib_parse.urlencode(data_dict)
+        data = compat_urllib_parse_urlencode(data_dict)
         data = self._download_json(
             base_url + data, playlist_id, 'Downloading playlist')
 
@@ -500,7 +499,8 @@ class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
         query['client_id'] = self._CLIENT_ID
         query['linked_partitioning'] = '1'
         query['offset'] = 0
-        data = compat_urllib_parse.urlencode(encode_dict(query))
+        data = compat_urllib_parse_urlencode(query)
+        data = compat_urllib_parse_urlencode(query)
         next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
 
         collected_results = 0
index 77841b94686a27feb00968e5f3b67729504cee6c..b17779e4bd5c1171a6415eb3bdd032c00f78df08 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import sanitized_Request
 
 
@@ -35,7 +35,7 @@ class StreamcloudIE(InfoExtractor):
             (?:id="[^"]+"\s+)?
             value="([^"]*)"
             ''', orig_webpage)
-        post = compat_urllib_parse.urlencode(fields)
+        post = compat_urllib_parse_urlencode(fields)
 
         self._sleep(12, video_id)
         headers = {
index 2c8e9b9410f09f23349584e7945bed64e5dd3a76..d6b2560f8a8f6a69092f77ca9aa1b8ee4aa9095a 100644 (file)
@@ -5,8 +5,8 @@ import json
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -74,7 +74,7 @@ class TelecincoIE(InfoExtractor):
         info_el = self._download_xml(info_url, episode).find('./video/info')
 
         video_link = info_el.find('videoUrl/link').text
-        token_query = compat_urllib_parse.urlencode({'id': video_link})
+        token_query = compat_urllib_parse_urlencode({'id': video_link})
         token_info = self._download_json(
             embed_data['flashvars']['ov_tk'] + '?' + token_query,
             episode,
index 6d78b5dfea0030f125062ad8ef46a1c6e731e4ea..50ed151636d4e8f46781780799c5fe49a5a2b75d 100644 (file)
@@ -5,7 +5,7 @@ import codecs
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -41,7 +41,7 @@ class TubiTvIE(InfoExtractor):
             'username': username,
             'password': password,
         }
-        payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
+        payload = compat_urllib_parse_urlencode(form_data).encode('utf-8')
         request = sanitized_Request(self._LOGIN_URL, payload)
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_page = self._download_webpage(
index d4169ec6dce32613243baa9ef553a2683178b7ef..c92dcc7b90f151a9ed05e82428dc14691b43e220 100644 (file)
@@ -9,12 +9,11 @@ from .common import InfoExtractor
 from ..compat import (
     compat_parse_qs,
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urlparse,
 )
 from ..utils import (
-    encode_dict,
     ExtractorError,
     int_or_none,
     orderedSet,
@@ -82,7 +81,7 @@ class TwitchBaseIE(InfoExtractor):
             post_url = compat_urlparse.urljoin(redirect_url, post_url)
 
         request = sanitized_Request(
-            post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
+            post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         request.add_header('Referer', redirect_url)
         response = self._download_webpage(
             request, None, 'Logging in as %s' % username)
@@ -250,7 +249,7 @@ class TwitchVodIE(TwitchItemBaseIE):
         formats = self._extract_m3u8_formats(
             '%s/vod/%s?%s' % (
                 self._USHER_BASE, item_id,
-                compat_urllib_parse.urlencode({
+                compat_urllib_parse_urlencode({
                     'allow_source': 'true',
                     'allow_audio_only': 'true',
                     'allow_spectre': 'true',
@@ -442,7 +441,7 @@ class TwitchStreamIE(TwitchBaseIE):
         }
         formats = self._extract_m3u8_formats(
             '%s/api/channel/hls/%s.m3u8?%s'
-            % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
+            % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
             channel_id, 'mp4')
         self._prefer_source(formats)
 
index a9046b86554c962c586ea5ab2213e64ec70721ae..6adfb2ceea8b1d15bd124a173e9a702a49e35735 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 from .common import InfoExtractor
 from ..compat import (
     compat_HTTPError,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_request,
     compat_urlparse,
 )
@@ -71,7 +71,7 @@ class UdemyIE(InfoExtractor):
     def _download_lecture(self, course_id, lecture_id):
         return self._download_json(
             'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
-                course_id, lecture_id, compat_urllib_parse.urlencode({
+                course_id, lecture_id, compat_urllib_parse_urlencode({
                     'video_only': '',
                     'auto_play': '',
                     'fields[lecture]': 'title,description,asset',
@@ -139,7 +139,7 @@ class UdemyIE(InfoExtractor):
         })
 
         request = sanitized_Request(
-            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         request.add_header('Referer', self._ORIGIN_URL)
         request.add_header('Origin', self._ORIGIN_URL)
 
index b755dda902f3370de27a8f496e7cc4b3b8b76a31..77bb200e960f4ece75c146e3404451eab3470ecb 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -48,7 +48,7 @@ class Vbox7IE(InfoExtractor):
                                         webpage, 'title').split('/')[0].strip()
 
         info_url = 'http://vbox7.com/play/magare.do'
-        data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
+        data = compat_urllib_parse_urlencode({'as3': '1', 'vid': video_id})
         info_request = sanitized_Request(info_url, data)
         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
index 6bfbd4d85d1810d94398111562b4fcbc15ba215f..8d92aee878d3ad0c0d5725db755451c88e527f66 100644 (file)
@@ -2,7 +2,7 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -93,7 +93,7 @@ class ViddlerIE(InfoExtractor):
         headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
         request = sanitized_Request(
             'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s'
-            % compat_urllib_parse.urlencode(query), None, headers)
+            % compat_urllib_parse_urlencode(query), None, headers)
         data = self._download_json(request, video_id)['video']
 
         formats = []
index 71c30d2cde54f11802f1e187160ae48c0ea88423..707a5735ad5463fec1d6996db4fd0b381a9205bf 100644 (file)
@@ -12,7 +12,6 @@ from ..compat import (
 )
 from ..utils import (
     determine_ext,
-    encode_dict,
     ExtractorError,
     InAdvancePagedList,
     int_or_none,
@@ -42,13 +41,13 @@ class VimeoBaseInfoExtractor(InfoExtractor):
         self.report_login()
         webpage = self._download_webpage(self._LOGIN_URL, None, False)
         token, vuid = self._extract_xsrft_and_vuid(webpage)
-        data = urlencode_postdata(encode_dict({
+        data = urlencode_postdata({
             'action': 'login',
             'email': username,
             'password': password,
             'service': 'vimeo',
             'token': token,
-        }))
+        })
         login_request = sanitized_Request(self._LOGIN_URL, data)
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_request.add_header('Referer', self._LOGIN_URL)
@@ -255,10 +254,10 @@ class VimeoIE(VimeoBaseInfoExtractor):
         if password is None:
             raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
         token, vuid = self._extract_xsrft_and_vuid(webpage)
-        data = urlencode_postdata(encode_dict({
+        data = urlencode_postdata({
             'password': password,
             'token': token,
-        }))
+        })
         if url.startswith('http://'):
             # vimeo only supports https now, but the user can give an http url
             url = url.replace('http://', 'https://')
@@ -274,7 +273,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
         password = self._downloader.params.get('videopassword')
         if password is None:
             raise ExtractorError('This video is protected by a password, use the --video-password option')
-        data = urlencode_postdata(encode_dict({'password': password}))
+        data = urlencode_postdata({'password': password})
         pass_url = url + '/check-password'
         password_request = sanitized_Request(pass_url, data)
         password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
@@ -575,7 +574,7 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
         token, vuid = self._extract_xsrft_and_vuid(webpage)
         fields['token'] = token
         fields['password'] = password
-        post = urlencode_postdata(encode_dict(fields))
+        post = urlencode_postdata(fields)
         password_path = self._search_regex(
             r'action="([^"]+)"', login_form, 'password URL')
         password_url = compat_urlparse.urljoin(page_url, password_path)
index d560a4b5e219c2d62cff17da8e47c3cfbb5f87ba..458099a4ac427baf9dd0f9110276ea4dfc8a8e28 100644 (file)
@@ -7,7 +7,7 @@ import json
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -204,7 +204,7 @@ class VKIE(InfoExtractor):
 
         request = sanitized_Request(
             'https://login.vk.com/?act=login',
-            compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+            compat_urllib_parse_urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(
             request, None, note='Logging in as %s' % username)
 
index bd55451732cea69570753cb7064066537f5600ae..baf39bb2cea714fb1578d80b4b8a83c9cd67e568 100644 (file)
@@ -7,7 +7,7 @@ from ..utils import (
     float_or_none,
     int_or_none,
 )
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 
 
 class VLiveIE(InfoExtractor):
@@ -43,7 +43,7 @@ class VLiveIE(InfoExtractor):
 
         playinfo = self._download_json(
             'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
-            % compat_urllib_parse.urlencode({
+            % compat_urllib_parse_urlencode({
                 'videoId': long_video_id,
                 'key': key,
                 'ptc': 'http',
index a97995a6dfd92383c2d25ea7250142404176ecad..f1abca4d9f2a7b43b2720d00db5b58b5b737c208 100644 (file)
@@ -2,7 +2,7 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
     NO_DEFAULT,
@@ -38,7 +38,7 @@ class VodlockerIE(InfoExtractor):
 
         if fields['op'] == 'download1':
             self._sleep(3, video_id)  # they do detect when requests happen too fast!
-            post = compat_urllib_parse.urlencode(fields)
+            post = compat_urllib_parse_urlencode(fields)
             req = sanitized_Request(url, post)
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
             webpage = self._download_webpage(
index 94abdb4f3ed3b99d5ab4f4b60897159d0b16bfc6..4e35e1f4415b86833cc3aa200e0d736441739b5c 100644 (file)
@@ -4,10 +4,9 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
 from ..utils import (
     ExtractorError,
-    encode_dict,
     int_or_none,
     sanitized_Request,
 )
@@ -109,7 +108,7 @@ class XFileShareIE(InfoExtractor):
             if countdown:
                 self._sleep(countdown, video_id)
 
-            post = compat_urllib_parse.urlencode(encode_dict(fields))
+            post = compat_urllib_parse_urlencode(fields)
 
             req = sanitized_Request(url, post)
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
index 4c6142927d38c088c5b03306439bd624f6e209e7..b2d8f4b48daddcf734d3a1fb461d1b92736bcfd1 100644 (file)
@@ -8,6 +8,7 @@ import re
 from .common import InfoExtractor, SearchInfoExtractor
 from ..compat import (
     compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
@@ -303,7 +304,7 @@ class YahooIE(InfoExtractor):
         region = self._search_regex(
             r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
             webpage, 'region', fatal=False, default='US')
-        data = compat_urllib_parse.urlencode({
+        data = compat_urllib_parse_urlencode({
             'protocol': 'http',
             'region': region,
         })
index e699e663f60818b090bb6bf0ccdf24802c3c14c4..158f3ea680c786844f3bf4a457ea4bfbc12e4a44 100644 (file)
@@ -7,7 +7,7 @@ import hashlib
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
@@ -170,7 +170,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
             missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
             request = sanitized_Request(
                 'https://music.yandex.ru/handlers/track-entries.jsx',
-                compat_urllib_parse.urlencode({
+                compat_urllib_parse_urlencode({
                     'entries': ','.join(missing_track_ids),
                     'lang': mu.get('settings', {}).get('lang', 'en'),
                     'external-domain': 'music.yandex.ru',
index 900eb2abac2bcc346eb8b07782ac017a25a44c24..fd7eb5a6d52f8c2ec348d3cfc908dee3d4743f0d 100644 (file)
@@ -8,7 +8,7 @@ import time
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_ord,
 )
 from ..utils import (
@@ -138,7 +138,7 @@ class YoukuIE(InfoExtractor):
                     '_00' + \
                     '/st/' + self.parse_ext_l(format) + \
                     '/fileid/' + get_fileid(format, n) + '?' + \
-                    compat_urllib_parse.urlencode(param)
+                    compat_urllib_parse_urlencode(param)
                 video_urls.append(video_url)
             video_urls_dict[format] = video_urls
 
index 96fa3b5aa7f59a33c5e0346a01a5046f8159822a..83b5840f76c7c9aa76760457e9b87ce86e83e680 100644 (file)
@@ -17,16 +17,15 @@ from ..swfinterp import SWFInterpreter
 from ..compat import (
     compat_chr,
     compat_parse_qs,
-    compat_urllib_parse,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urlparse,
     compat_str,
 )
 from ..utils import (
     clean_html,
-    encode_dict,
     error_to_compat_str,
     ExtractorError,
     float_or_none,
@@ -116,7 +115,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
             'hl': 'en_US',
         }
 
-        login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
+        login_data = compat_urllib_parse_urlencode(login_form_strs).encode('ascii')
 
         req = sanitized_Request(self._LOGIN_URL, login_data)
         login_results = self._download_webpage(
@@ -149,7 +148,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
                 'TrustDevice': 'on',
             })
 
-            tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
+            tfa_data = compat_urllib_parse_urlencode(tfa_form_strs).encode('ascii')
 
             tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
             tfa_results = self._download_webpage(
@@ -1007,7 +1006,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 continue
             sub_formats = []
             for ext in self._SUBTITLE_FORMATS:
-                params = compat_urllib_parse.urlencode({
+                params = compat_urllib_parse_urlencode({
                     'lang': lang,
                     'v': video_id,
                     'fmt': ext,
@@ -1056,7 +1055,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             if caption_url:
                 timestamp = args['timestamp']
                 # We get the available subtitles
-                list_params = compat_urllib_parse.urlencode({
+                list_params = compat_urllib_parse_urlencode({
                     'type': 'list',
                     'tlangs': 1,
                     'asrs': 1,
@@ -1075,7 +1074,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                     sub_lang = lang_node.attrib['lang_code']
                     sub_formats = []
                     for ext in self._SUBTITLE_FORMATS:
-                        params = compat_urllib_parse.urlencode({
+                        params = compat_urllib_parse_urlencode({
                             'lang': original_lang,
                             'tlang': sub_lang,
                             'fmt': ext,
@@ -1094,7 +1093,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             caption_tracks = args['caption_tracks']
             caption_translation_languages = args['caption_translation_languages']
             caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
-            parsed_caption_url = compat_urlparse.urlparse(caption_url)
+            parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
             caption_qs = compat_parse_qs(parsed_caption_url.query)
 
             sub_lang_list = {}
@@ -1110,7 +1109,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                         'fmt': [ext],
                     })
                     sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
-                        query=compat_urllib_parse.urlencode(caption_qs, True)))
+                        query=compat_urllib_parse_urlencode(caption_qs, True)))
                     sub_formats.append({
                         'url': sub_url,
                         'ext': ext,
@@ -1140,7 +1139,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'cpn': [cpn],
         })
         playback_url = compat_urlparse.urlunparse(
-            parsed_playback_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+            parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
         self._download_webpage(
             playback_url, video_id, 'Marking watched',
@@ -1225,7 +1224,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             # this can be viewed without login into Youtube
             url = proto + '://www.youtube.com/embed/%s' % video_id
             embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
-            data = compat_urllib_parse.urlencode({
+            data = compat_urllib_parse_urlencode({
                 'video_id': video_id,
                 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
                 'sts': self._search_regex(
@@ -2085,7 +2084,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
                 'spf': 'navigate',
             }
             url_query.update(self._EXTRA_QUERY_ARGS)
-            result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
+            result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
             data = self._download_json(
                 result_url, video_id='query "%s"' % query,
                 note='Downloading page %s' % pagenum,
index b6e1dc8099f8d3e8f513b38303a282000e4932df..eacd81bf9ad3c733c578ace81cd7bf03ebff4725 100644 (file)
@@ -47,6 +47,7 @@ from .compat import (
     compat_str,
     compat_urllib_error,
     compat_urllib_parse,
+    compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urllib_request,
     compat_urlparse,
@@ -1315,7 +1316,7 @@ def shell_quote(args):
 def smuggle_url(url, data):
     """ Pass additional data in a URL for internal use. """
 
-    sdata = compat_urllib_parse.urlencode(
+    sdata = compat_urllib_parse_urlencode(
         {'__youtubedl_smuggle': json.dumps(data)})
     return url + '#' + sdata
 
@@ -1789,22 +1790,15 @@ def read_batch_urls(batch_fd):
 
 
 def urlencode_postdata(*args, **kargs):
-    return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+    return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
 
 
 def update_url_query(url, query):
     parsed_url = compat_urlparse.urlparse(url)
     qs = compat_parse_qs(parsed_url.query)
     qs.update(query)
-    qs = encode_dict(qs)
     return compat_urlparse.urlunparse(parsed_url._replace(
-        query=compat_urllib_parse.urlencode(qs, True)))
-
-
-def encode_dict(d, encoding='utf-8'):
-    def encode(v):
-        return v.encode(encoding) if isinstance(v, compat_basestring) else v
-    return dict((encode(k), encode(v)) for k, v in d.items())
+        query=compat_urllib_parse_urlencode(qs, True)))
 
 
 def dict_get(d, key_or_keys, default=None, skip_false_values=True):