Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
searx-engine
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
gargantext
searx-engine
Commits
4689fe34
Commit
4689fe34
authored
May 02, 2015
by
Alexandre Flament
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update versions.cfg to use the current up-to-date packages
parent
bbd83f5a
Changes
40
Hide whitespace changes
Inline
Side-by-side
Showing
40 changed files
with
483 additions
and
395 deletions
+483
-395
autocomplete.py
searx/autocomplete.py
+1
-1
__init__.py
searx/engines/__init__.py
+2
-2
bing.py
searx/engines/bing.py
+14
-12
bing_images.py
searx/engines/bing_images.py
+17
-15
bing_news.py
searx/engines/bing_news.py
+14
-10
blekko_images.py
searx/engines/blekko_images.py
+11
-9
btdigg.py
searx/engines/btdigg.py
+11
-9
dailymotion.py
searx/engines/dailymotion.py
+14
-12
deezer.py
searx/engines/deezer.py
+11
-9
deviantart.py
searx/engines/deviantart.py
+13
-11
digg.py
searx/engines/digg.py
+11
-9
duckduckgo.py
searx/engines/duckduckgo.py
+16
-14
dummy.py
searx/engines/dummy.py
+6
-4
faroo.py
searx/engines/faroo.py
+11
-9
flickr.py
searx/engines/flickr.py
+14
-12
flickr_noapi.py
searx/engines/flickr_noapi.py
+13
-11
generalfile.py
searx/engines/generalfile.py
+13
-11
gigablast.py
searx/engines/gigablast.py
+11
-9
github.py
searx/engines/github.py
+12
-10
google_images.py
searx/engines/google_images.py
+12
-10
google_news.py
searx/engines/google_news.py
+12
-10
json_engine.py
searx/engines/json_engine.py
+1
-1
kickass.py
searx/engines/kickass.py
+11
-9
mediawiki.py
searx/engines/mediawiki.py
+13
-11
mixcloud.py
searx/engines/mixcloud.py
+11
-9
openstreetmap.py
searx/engines/openstreetmap.py
+11
-9
photon.py
searx/engines/photon.py
+11
-9
searchcode_code.py
searx/engines/searchcode_code.py
+11
-9
searchcode_doc.py
searx/engines/searchcode_doc.py
+11
-9
soundcloud.py
searx/engines/soundcloud.py
+11
-9
spotify.py
searx/engines/spotify.py
+11
-9
stackoverflow.py
searx/engines/stackoverflow.py
+11
-9
subtitleseeker.py
searx/engines/subtitleseeker.py
+11
-9
twitter.py
searx/engines/twitter.py
+13
-11
www1x.py
searx/engines/www1x.py
+10
-9
www500px.py
searx/engines/www500px.py
+13
-11
yacy.py
searx/engines/yacy.py
+1
-1
yahoo.py
searx/engines/yahoo.py
+12
-10
youtube.py
searx/engines/youtube.py
+2
-2
versions.cfg
versions.cfg
+69
-50
No files found.
searx/autocomplete.py
View file @
4689fe34
...
...
@@ -28,7 +28,7 @@ from searx.poolrequests import get as http_get
def
get
(
*
args
,
**
kwargs
):
if
not
'timeout'
in
kwargs
:
if
'timeout'
not
in
kwargs
:
kwargs
[
'timeout'
]
=
settings
[
'server'
][
'request_timeout'
]
return
http_get
(
*
args
,
**
kwargs
)
...
...
searx/engines/__init__.py
View file @
4689fe34
...
...
@@ -86,7 +86,7 @@ def load_engine(engine_data):
continue
if
getattr
(
engine
,
engine_attr
)
is
None
:
logger
.
error
(
'Missing engine config attribute: "{0}.{1}"'
.
format
(
engine
.
name
,
engine_attr
))
.
format
(
engine
.
name
,
engine_attr
))
sys
.
exit
(
1
)
engine
.
stats
=
{
...
...
@@ -106,7 +106,7 @@ def load_engine(engine_data):
if
engine
.
shortcut
:
if
engine
.
shortcut
in
engine_shortcuts
:
logger
.
error
(
'Engine config error: ambigious shortcut: {0}'
.
format
(
engine
.
shortcut
))
.
format
(
engine
.
shortcut
))
sys
.
exit
(
1
)
engine_shortcuts
[
engine
.
shortcut
]
=
engine
.
name
return
engine
...
...
searx/engines/bing.py
View file @
4689fe34
## Bing (Web)
#
# @website https://www.bing.com
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content
#
# @todo publishedDate
"""
Bing (Web)
@website https://www.bing.com
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo publishedDate
"""
from
urllib
import
urlencode
from
cgi
import
escape
...
...
searx/engines/bing_images.py
View file @
4689fe34
## Bing (Images)
#
# @website https://www.bing.com/images
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, img_src
#
# @todo currently there are up to 35 images receive per page,
# because bing does not parse count=10.
# limited response to 10 images
"""
Bing (Images)
@website https://www.bing.com/images
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, img_src
@todo currently there are up to 35 images receive per page,
because bing does not parse count=10.
limited response to 10 images
"""
from
urllib
import
urlencode
from
lxml
import
html
...
...
@@ -76,7 +78,7 @@ def response(resp):
title
=
link
.
attrib
.
get
(
't1'
)
ihk
=
link
.
attrib
.
get
(
'ihk'
)
#url = 'http://' + link.attrib.get('t3')
#
url = 'http://' + link.attrib.get('t3')
url
=
yaml_data
.
get
(
'surl'
)
img_src
=
yaml_data
.
get
(
'imgurl'
)
...
...
searx/engines/bing_news.py
View file @
4689fe34
## Bing (News)
#
# @website https://www.bing.com/news
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content, publishedDate
"""
Bing (News)
@website https://www.bing.com/news
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, publishedDate
"""
from
urllib
import
urlencode
from
cgi
import
escape
...
...
@@ -87,6 +89,8 @@ def response(resp):
publishedDate
=
parser
.
parse
(
publishedDate
,
dayfirst
=
False
)
except
TypeError
:
publishedDate
=
datetime
.
now
()
except
ValueError
:
publishedDate
=
datetime
.
now
()
# append result
results
.
append
({
'url'
:
url
,
...
...
searx/engines/blekko_images.py
View file @
4689fe34
## Blekko (Images)
#
# @website https://blekko.com
# @provide-api yes (inofficial)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, img_src
"""
Blekko (Images)
@website https://blekko.com
@provide-api yes (inofficial)
@using-api yes
@results JSON
@stable yes
@parse url, title, img_src
"""
from
json
import
loads
from
urllib
import
urlencode
...
...
searx/engines/btdigg.py
View file @
4689fe34
## BTDigg (Videos, Music, Files)
#
# @website https://btdigg.org
# @provide-api yes (on demand)
#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content, seed, leech, magnetlink
"""
BTDigg (Videos, Music, Files)
@website https://btdigg.org
@provide-api yes (on demand)
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from
urlparse
import
urljoin
from
cgi
import
escape
...
...
searx/engines/dailymotion.py
View file @
4689fe34
## Dailymotion (Videos)
#
# @website https://www.dailymotion.com
# @provide-api yes (http://www.dailymotion.com/developer)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, thumbnail, publishedDate, embedded
#
# @todo set content-parameter with correct data
"""
Dailymotion (Videos)
@website https://www.dailymotion.com
@provide-api yes (http://www.dailymotion.com/developer)
@using-api yes
@results JSON
@stable yes
@parse url, title, thumbnail, publishedDate, embedded
@todo set content-parameter with correct data
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
@@ -48,7 +50,7 @@ def response(resp):
search_res
=
loads
(
resp
.
text
)
# return empty array if there are no results
if
not
'list'
in
search_res
:
if
'list'
not
in
search_res
:
return
[]
# parse results
...
...
searx/engines/deezer.py
View file @
4689fe34
## Deezer (Music)
#
# @website https://deezer.com
# @provide-api yes (http://developers.deezer.com/api/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, embedded
"""
Deezer (Music)
@website https://deezer.com
@provide-api yes (http://developers.deezer.com/api/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content, embedded
"""
from
json
import
loads
from
urllib
import
urlencode
...
...
searx/engines/deviantart.py
View file @
4689fe34
## Deviantart (Images)
#
# @website https://www.deviantart.com/
# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
#
# @using-api no (TODO, rewrite to api)
# @results HTML
# @stable no (HTML can change)
# @parse url, title, thumbnail_src, img_src
#
# @todo rewrite to api
"""
Deviantart (Images)
@website https://www.deviantart.com/
@provide-api yes (https://www.deviantart.com/developers/) (RSS)
@using-api no (TODO, rewrite to api)
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail_src, img_src
@todo rewrite to api
"""
from
urllib
import
urlencode
from
urlparse
import
urljoin
...
...
searx/engines/digg.py
View file @
4689fe34
## Digg (News, Social media)
#
# @website https://digg.com/
# @provide-api no
#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content, publishedDate, thumbnail
"""
Digg (News, Social media)
@website https://digg.com/
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, publishedDate, thumbnail
"""
from
urllib
import
quote_plus
from
json
import
loads
...
...
searx/engines/duckduckgo.py
View file @
4689fe34
## DuckDuckGo (Web)
#
# @website https://duckduckgo.com/
# @provide-api yes (https://duckduckgo.com/api),
# but not all results from search-site
#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content
#
# @todo rewrite to api
# @todo language support
# (the current used site does not support language-change)
"""
DuckDuckGo (Web)
@website https://duckduckgo.com/
@provide-api yes (https://duckduckgo.com/api),
but not all results from search-site
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo rewrite to api
@todo language support
(the current used site does not support language-change)
"""
from
urllib
import
urlencode
from
lxml.html
import
fromstring
...
...
searx/engines/dummy.py
View file @
4689fe34
## Dummy
#
# @results empty array
# @stable yes
"""
Dummy
@results empty array
@stable yes
"""
# do search-request
...
...
searx/engines/faroo.py
View file @
4689fe34
## Faroo (Web, News)
#
# @website http://www.faroo.com
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, publishedDate, img_src
"""
Faroo (Web, News)
@website http://www.faroo.com
@provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
@using-api yes
@results JSON
@stable yes
@parse url, title, content, publishedDate, img_src
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
searx/engines/flickr.py
View file @
4689fe34
#!/usr/bin/env python
## Flickr (Images)
#
# @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, thumbnail, img_src
#More info on api-key : https://www.flickr.com/services/apps/create/
"""
Flickr (Images)
@website https://www.flickr.com
@provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
@using-api yes
@results JSON
@stable yes
@parse url, title, thumbnail, img_src
More info on api-key : https://www.flickr.com/services/apps/create/
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
@@ -48,10 +50,10 @@ def response(resp):
search_results
=
loads
(
resp
.
text
)
# return empty array if there are no results
if
not
'photos'
in
search_results
:
if
'photos'
not
in
search_results
:
return
[]
if
not
'photo'
in
search_results
[
'photos'
]:
if
'photo'
not
in
search_results
[
'photos'
]:
return
[]
photos
=
search_results
[
'photos'
][
'photo'
]
...
...
searx/engines/flickr_noapi.py
View file @
4689fe34
#!/usr/bin/env python
# Flickr (Images)
#
# @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
#
# @using-api no
# @results HTML
# @stable no
# @parse url, title, thumbnail, img_src
"""
Flickr (Images)
@website https://www.flickr.com
@provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
@using-api no
@results HTML
@stable no
@parse url, title, thumbnail, img_src
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
@@ -20,8 +22,8 @@ logger = logger.getChild('flickr-noapi')
categories
=
[
'images'
]
url
=
'https://
secure
.flickr.com/'
search_url
=
url
+
'search
/
?{query}&page={page}'
url
=
'https://
www
.flickr.com/'
search_url
=
url
+
'search?{query}&page={page}'
photo_url
=
'https://www.flickr.com/photos/{userid}/{photoid}'
regex
=
re
.
compile
(
r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":"
,
re
.
DOTALL
)
image_sizes
=
(
'o'
,
'k'
,
'h'
,
'b'
,
'c'
,
'z'
,
'n'
,
'm'
,
't'
,
'q'
,
's'
)
...
...
searx/engines/generalfile.py
View file @
4689fe34
## General Files (Files)
#
# @website http://www.general-files.org
# @provide-api no (nothing found)
#
# @using-api no (because nothing found)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content
#
# @todo detect torrents?
"""
General Files (Files)
@website http://www.general-files.org
@provide-api no (nothing found)
@using-api no (because nothing found)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo detect torrents?
"""
from
lxml
import
html
...
...
searx/engines/gigablast.py
View file @
4689fe34
## Gigablast (Web)
#
# @website http://gigablast.com
# @provide-api yes (http://gigablast.com/api.html)
#
# @using-api yes
# @results XML
# @stable yes
# @parse url, title, content
"""
Gigablast (Web)
@website http://gigablast.com
@provide-api yes (http://gigablast.com/api.html)
@using-api yes
@results XML
@stable yes
@parse url, title, content
"""
from
urllib
import
urlencode
from
cgi
import
escape
...
...
searx/engines/github.py
View file @
4689fe34
## Github (It)
#
# @website https://github.com/
# @provide-api yes (https://developer.github.com/v3/)
#
# @using-api yes
# @results JSON
# @stable yes (using api)
# @parse url, title, content
"""
Github (It)
@website https://github.com/
@provide-api yes (https://developer.github.com/v3/)
@using-api yes
@results JSON
@stable yes (using api)
@parse url, title, content
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
@@ -37,7 +39,7 @@ def response(resp):
search_res
=
loads
(
resp
.
text
)
# check if items are recieved
if
not
'items'
in
search_res
:
if
'items'
not
in
search_res
:
return
[]
# parse results
...
...
searx/engines/google_images.py
View file @
4689fe34
## Google (Images)
#
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
#
# @using-api yes
# @results JSON
# @stable yes (but deprecated)
# @parse url, title, img_src
"""
Google (Images)
@website https://www.google.com
@provide-api yes (https://developers.google.com/web-search/docs/),
deprecated!
@using-api yes
@results JSON
@stable yes (but deprecated)
@parse url, title, img_src
"""
from
urllib
import
urlencode
,
unquote
from
json
import
loads
...
...
searx/engines/google_news.py
View file @
4689fe34
## Google (News)
#
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
#
# @using-api yes
# @results JSON
# @stable yes (but deprecated)
# @parse url, title, content, publishedDate
"""
Google (News)
@website https://www.google.com
@provide-api yes (https://developers.google.com/web-search/docs/),
deprecated!
@using-api yes
@results JSON
@stable yes (but deprecated)
@parse url, title, content, publishedDate
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
searx/engines/json_engine.py
View file @
4689fe34
...
...
@@ -6,7 +6,7 @@ search_url = None
url_query
=
None
content_query
=
None
title_query
=
None
#suggestion_xpath = ''
#
suggestion_xpath = ''
def
iterate
(
iterable
):
...
...
searx/engines/kickass.py
View file @
4689fe34
## Kickass Torrent (Videos, Music, Files)
#
# @website https://kickass.so
# @provide-api no (nothing found)
#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
# @parse url, title, content, seed, leech, magnetlink
"""
Kickass Torrent (Videos, Music, Files)
@website https://kickass.so
@provide-api no (nothing found)
@using-api no
@results HTML (using search portal)
@stable yes (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from
urlparse
import
urljoin
from
cgi
import
escape
...
...
searx/engines/mediawiki.py
View file @
4689fe34
## general mediawiki-engine (Web)
#
# @website websites built on mediawiki (https://www.mediawiki.org)
# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title
#
# @todo content
"""
general mediawiki-engine (Web)
@website websites built on mediawiki (https://www.mediawiki.org)
@provide-api yes (http://www.mediawiki.org/wiki/API:Search)
@using-api yes
@results JSON
@stable yes
@parse url, title
@todo content
"""
from
json
import
loads
from
string
import
Formatter
...
...
searx/engines/mixcloud.py
View file @
4689fe34
## Mixcloud (Music)
#
# @website https://http://www.mixcloud.com/
# @provide-api yes (http://www.mixcloud.com/developers/
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, embedded, publishedDate
"""
Mixcloud (Music)
@website https://http://www.mixcloud.com/
@provide-api yes (http://www.mixcloud.com/developers/
@using-api yes
@results JSON
@stable yes
@parse url, title, content, embedded, publishedDate
"""
from
json
import
loads
from
urllib
import
urlencode
...
...
searx/engines/openstreetmap.py
View file @
4689fe34
## OpenStreetMap (Map)
#
# @website https://openstreetmap.org/
# @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title
"""
OpenStreetMap (Map)
@website https://openstreetmap.org/
@provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
@using-api yes
@results JSON
@stable yes
@parse url, title
"""
from
json
import
loads
from
searx.utils
import
searx_useragent
...
...
searx/engines/photon.py
View file @
4689fe34
## Photon (Map)
#
# @website https://photon.komoot.de
# @provide-api yes (https://photon.komoot.de/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title
"""
Photon (Map)
@website https://photon.komoot.de
@provide-api yes (https://photon.komoot.de/)
@using-api yes
@results JSON
@stable yes
@parse url, title
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
searx/engines/searchcode_code.py
View file @
4689fe34
## Searchcode (It)
#
# @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content
"""
Searchcode (It)
@website https://searchcode.com/
@provide-api yes (https://searchcode.com/api/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
searx/engines/searchcode_doc.py
View file @
4689fe34
## Searchcode (It)
#
# @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content
"""
Searchcode (It)
@website https://searchcode.com/
@provide-api yes (https://searchcode.com/api/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
from
urllib
import
urlencode
from
json
import
loads
...
...
searx/engines/soundcloud.py
View file @
4689fe34
## Soundcloud (Music)
#
# @website https://soundcloud.com
# @provide-api yes (https://developers.soundcloud.com/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, publishedDate, embedded
"""
Soundcloud (Music)
@website https://soundcloud.com
@provide-api yes (https://developers.soundcloud.com/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content, publishedDate, embedded
"""
from
json
import
loads
from
urllib
import
urlencode
,
quote_plus
...
...
searx/engines/spotify.py
View file @
4689fe34
## Spotify (Music)
#
# @website https://spotify.com
# @provide-api yes (https://developer.spotify.com/web-api/search-item/)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, embedded
"""
Spotify (Music)
@website https://spotify.com
@provide-api yes (https://developer.spotify.com/web-api/search-item/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content, embedded
"""
from
json
import
loads
from
urllib
import
urlencode
...
...
searx/engines/stackoverflow.py
View file @
4689fe34
## Stackoverflow (It)
#
# @website https://stackoverflow.com/
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, content
"""
Stackoverflow (It)
@website https://stackoverflow.com/
@provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content
"""
from
urlparse
import
urljoin
from
cgi
import
escape
...
...
searx/engines/subtitleseeker.py
View file @
4689fe34
## Subtitleseeker (Video)
#
# @website http://www.subtitleseeker.com
# @provide-api no
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, content
"""
Subtitleseeker (Video)
@website http://www.subtitleseeker.com
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content
"""
from
cgi
import
escape
from
urllib
import
quote_plus
...
...
searx/engines/twitter.py
View file @
4689fe34
## Twitter (Social media)
#
# @website https://twitter.com/
# @provide-api yes (https://dev.twitter.com/docs/using-search)
#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content
#
# @todo publishedDate
"""
Twitter (Social media)
@website https://twitter.com/
@provide-api yes (https://dev.twitter.com/docs/using-search)
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo publishedDate
"""
from
urlparse
import
urljoin
from
urllib
import
urlencode
...
...
searx/engines/www1x.py
View file @
4689fe34
## 1x (Images)
#
# @website http://1x.com/
# @provide-api no
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, thumbnail, img_src, content
"""
1x (Images)
@website http://1x.com/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail, img_src, content
"""
from
urllib
import
urlencode
from
urlparse
import
urljoin
...
...
searx/engines/www500px.py
View file @
4689fe34
## 500px (Images)
#
# @website https://500px.com
# @provide-api yes (https://developers.500px.com/)
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, thumbnail, img_src, content
#
# @todo rewrite to api
"""
500px (Images)
@website https://500px.com
@provide-api yes (https://developers.500px.com/)
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail, img_src, content
@todo rewrite to api
"""
from
urllib
import
urlencode
...
...
searx/engines/yacy.py
View file @
4689fe34
#
#
Yacy (Web, Images, Videos, Music, Files)
# Yacy (Web, Images, Videos, Music, Files)
#
# @website http://yacy.net
# @provide-api yes
...
...
searx/engines/yahoo.py
View file @
4689fe34
## Yahoo (Web)
#
# @website https://search.yahoo.com/web
# @provide-api yes (https://developer.yahoo.com/boss/search/),
# $0.80/1000 queries
#
# @using-api no (because pricing)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content, suggestion
"""
Yahoo (Web)
@website https://search.yahoo.com/web
@provide-api yes (https://developer.yahoo.com/boss/search/),
$0.80/1000 queries
@using-api no (because pricing)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, suggestion
"""
from
urllib
import
urlencode
from
urlparse
import
unquote
...
...
searx/engines/youtube.py
View file @
4689fe34
#
#
Youtube (Videos)
# Youtube (Videos)
#
# @website https://www.youtube.com/
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
...
...
@@ -47,7 +47,7 @@ def response(resp):
search_results
=
loads
(
resp
.
text
)
# return empty array if there are no results
if
not
'feed'
in
search_results
:
if
'feed'
not
in
search_results
:
return
[]
feed
=
search_results
[
'feed'
]
...
...
versions.cfg
View file @
4689fe34
...
...
@@ -2,96 +2,115 @@
Babel = 1.3
Flask = 0.10.1
Flask-Babel = 0.9
Jinja2 = 2.7.
2
MarkupSafe = 0.
18
Pygments = 2.0.
1
WebOb = 1.
3
.1
WebTest = 2.0.1
1
Werkzeug = 0.
9
.4
Jinja2 = 2.7.
3
MarkupSafe = 0.
23
Pygments = 2.0.
2
WebOb = 1.
4
.1
WebTest = 2.0.1
8
Werkzeug = 0.
10
.4
buildout-versions = 1.7
collective.recipe.omelette = 0.16
coverage = 3.7.1
decorator = 3.4.
0
docutils = 0.1
1
flake8 = 2.
1
.0
itsdangerous = 0.2
3
mccabe = 0.
2.1
decorator = 3.4.
2
docutils = 0.1
2
flake8 = 2.
4
.0
itsdangerous = 0.2
4
mccabe = 0.
3
mock = 1.0.1
pep8 = 1.
4.6
plone.testing = 4.0.
8
pyflakes = 0.
7.3
pytz = 201
3b
pyyaml = 3.1
0
requests = 2.
5.3
pep8 = 1.
5.7
plone.testing = 4.0.
13
pyflakes = 0.
8.1
pytz = 201
5.2
pyyaml = 3.1
1
requests = 2.
6.2
robotframework-debuglibrary = 0.3
robotframework-httplibrary = 0.4.2
robotframework-selenium2library = 1.
5
.0
robotsuite = 1.
4.2
selenium = 2.
39
.0
robotframework-selenium2library = 1.
6
.0
robotsuite = 1.
6.1
selenium = 2.
45
.0
speaklater = 1.3
unittest2 =
0.5
.1
waitress = 0.8.
8
unittest2 =
1.0
.1
waitress = 0.8.
9
zc.recipe.testrunner = 2.0.0
pyopenssl = 0.15.1
ndg-httpsclient = 0.3.3
pyasn1 = 0.1.7
pyasn1-modules = 0.0.5
certifi = 14.05.14
certifi = 2015.04.28
#
cffi = 0.9.2
cryptography = 0.8.2
# Required by:
# WebTest==2.0.1
1
# WebTest==2.0.1
8
beautifulsoup4 = 4.3.2
# Required by:
# cryptography==0.8.2
enum34 = 1.0.4
# Required by:
# robotframework-httplibrary==0.4.2
jsonpatch = 1.
3
jsonpatch = 1.
9
# Required by:
# robotframework-httplibrary==0.4.2
jsonpointer = 1.1
jsonpointer = 1.7
# Required by:
# traceback2==1.4.0
linecache2 = 1.0.0
# Required by:
# robotsuite==1.6.1
# searx==0.7.0
lxml = 3.4.4
# Required by:
# robotsuite==1.4.2
# searx==0.1
lxml = 3.2.5
# cffi==0.9.2
pycparser = 2.12
# Required by:
# searx==0.7.0
python-dateutil = 2.4.2
# Required by:
# robotframework-httplibrary==0.4.2
robotframework = 2.8.3
robotframework = 2.8.7
# Required by:
# searx==0.7.0
# zope.exceptions==4.0.7
# zope.interface==4.1.2
# zope.testrunner==4.4.8
setuptools = 15.2
# Required by:
# plone.testing==4.0.8
# robotsuite==1.4.2
# searx==0.1
# zope.exceptions==4.0.6
# zope.interface==4.0.5
# zope.testrunner==4.4.1
setuptools = 2.1
# robotsuite==1.6.1
# zope.testrunner==4.4.8
six = 1.9.0
# Required by:
#
zope.testrunner==4.4
.1
six = 1.6.1
#
unittest2==1.0
.1
traceback2 = 1.4.0
# Required by:
# collective.recipe.omelette==0.16
zc.recipe.egg = 2.0.1
# Required by:
# zope.testrunner==4.4.
1
zope.exceptions = 4.0.
6
# zope.testrunner==4.4.
8
zope.exceptions = 4.0.
7
# Required by:
# zope.testrunner==4.4.
1
zope.interface = 4.
0.5
# zope.testrunner==4.4.
8
zope.interface = 4.
1.2
# Required by:
# plone.testing==4.0.
8
zope.testing = 4.1.
2
# plone.testing==4.0.
13
zope.testing = 4.1.
3
# Required by:
# zc.recipe.testrunner==2.0.0
zope.testrunner = 4.4.1
# Required by:
# searx==0.3.0
python-dateutil = 2.2
zope.testrunner = 4.4.8
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment