]> gitweb @ CieloNegro.org - youtube-dl.git/commitdiff
[googlesearch] Fix start, and skip playlists (Fixes #2329)
authorPhilipp Hagemeister <phihag@phihag.de>
Thu, 6 Feb 2014 02:29:10 +0000 (03:29 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Thu, 6 Feb 2014 02:29:10 +0000 (03:29 +0100)
test/test_playlists.py
youtube_dl/extractor/googlesearch.py

index fda2e0112c6d37f0d90b3cb56097c03c12ed6f3c..e0eb0546089c557a945ff0f38a2a65414ed88320 100644 (file)
@@ -34,6 +34,7 @@ from youtube_dl.extractor import (
     KhanAcademyIE,
     EveryonesMixtapeIE,
     RutubeChannelIE,
+    GoogleSearchIE,
     GenericIE,
 )
 
@@ -240,6 +241,14 @@ class TestPlaylists(unittest.TestCase):
         self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
         self.assertEqual(len(result['entries']), 3)
 
+    def test_GoogleSearch(self):
+        dl = FakeYDL()
+        ie = GoogleSearchIE(dl)
+        result = ie.extract('gvsearch15:python language')
+        self.assertIsPlaylist(result)
+        self.assertEqual(result['id'], 'python language')
+        self.assertEqual(result['title'], 'python language')
+        self.assertTrue(len(result['entries']) == 15)
 
 if __name__ == '__main__':
     unittest.main()
index f9c88e9b5328a035cda2abe1014a015fceff2ed7..5c25642702993f1ec344ce9a0d4967fffedc760a 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import itertools
 import re
 
@@ -8,32 +10,42 @@ from ..utils import (
 
 
 class GoogleSearchIE(SearchInfoExtractor):
-    IE_DESC = u'Google Video search'
-    _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
+    IE_DESC = 'Google Video search'
     _MAX_RESULTS = 1000
-    IE_NAME = u'video.google:search'
+    IE_NAME = 'video.google:search'
     _SEARCH_KEY = 'gvsearch'
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
 
+        entries = []
         res = {
             '_type': 'playlist',
             'id': query,
-            'entries': []
+            'title': query,
         }
 
-        for pagenum in itertools.count(1):
-            result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
-            webpage = self._download_webpage(result_url, u'gvsearch:' + query,
-                                             note='Downloading result page ' + str(pagenum))
+        for pagenum in itertools.count():
+            result_url = (
+                'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
+                % (compat_urllib_parse.quote_plus(query), pagenum * 10))
+
+            webpage = self._download_webpage(
+                result_url, 'gvsearch:' + query,
+                note='Downloading result page ' + str(pagenum + 1))
+
+            for hit_idx, mobj in enumerate(re.finditer(
+                    r'<h3 class="r"><a href="([^"]+)"', webpage)):
+
+                # Skip playlists
+                if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
+                    continue
 
-            for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
-                e = {
+                entries.append({
                     '_type': 'url',
                     'url': mobj.group(1)
-                }
-                res['entries'].append(e)
+                })
 
-            if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+            if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
+                res['entries'] = entries[:n]
                 return res