Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
searx-engine
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
gargantext
searx-engine
Commits
a508d540
Commit
a508d540
authored
Dec 16, 2014
by
Thomas Pointhuber
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[fix] pep8
parent
91f99732
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
35 additions
and
17 deletions
+35
-17
bing_news.py
searx/engines/bing_news.py
+8
-3
faroo.py
searx/engines/faroo.py
+11
-4
google_images.py
searx/engines/google_images.py
+1
-1
kickass.py
searx/engines/kickass.py
+4
-4
soundcloud.py
searx/engines/soundcloud.py
+6
-1
yahoo.py
searx/engines/yahoo.py
+5
-4
No files found.
searx/engines/bing_news.py
View file @
a508d540
...
...
@@ -57,12 +57,16 @@ def response(resp):
link
=
result
.
xpath
(
'.//div[@class="newstitle"]/a'
)[
0
]
url
=
link
.
attrib
.
get
(
'href'
)
title
=
' '
.
join
(
link
.
xpath
(
'.//text()'
))
contentXPath
=
result
.
xpath
(
'.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()'
)
contentXPath
=
result
.
xpath
(
'.//div[@class="sn_txt"]/div'
'//span[@class="sn_snip"]//text()'
)
if
contentXPath
is
not
None
:
content
=
escape
(
' '
.
join
(
contentXPath
))
# parse publishedDate
publishedDateXPath
=
result
.
xpath
(
'.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()'
)
publishedDateXPath
=
result
.
xpath
(
'.//div[@class="sn_txt"]/div'
'//span[contains(@class,"sn_ST")]'
'//span[contains(@class,"sn_tm")]'
'//text()'
)
if
publishedDateXPath
is
not
None
:
publishedDate
=
escape
(
' '
.
join
(
publishedDateXPath
))
...
...
@@ -74,7 +78,8 @@ def response(resp):
timeNumbers
=
re
.
findall
(
r'\d+'
,
publishedDate
)
publishedDate
=
datetime
.
now
()
\
-
timedelta
(
hours
=
int
(
timeNumbers
[
0
]))
elif
re
.
match
(
"^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$"
,
publishedDate
):
elif
re
.
match
(
"^[0-9]+ hour(s|),"
" [0-9]+ minute(s|) ago$"
,
publishedDate
):
timeNumbers
=
re
.
findall
(
r'\d+'
,
publishedDate
)
publishedDate
=
datetime
.
now
()
\
-
timedelta
(
hours
=
int
(
timeNumbers
[
0
]))
\
...
...
searx/engines/faroo.py
View file @
a508d540
...
...
@@ -22,10 +22,17 @@ api_key = None
# search-url
url
=
'http://www.faroo.com/'
search_url
=
url
+
'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
search_url
=
url
+
'api?{query}'
\
'&start={offset}'
\
'&length={number_of_results}'
\
'&l={language}'
\
'&src={categorie}'
\
'&i=false'
\
'&f=json'
\
'&key={api_key}'
# noqa
search_category
=
{
'general'
:
'web'
,
'news'
:
'news'
}
'news'
:
'news'
}
# do search-request
...
...
@@ -80,8 +87,8 @@ def response(resp):
# parse results
for
result
in
search_res
[
'results'
]:
if
result
[
'news'
]:
# timestamp (
how many milliseconds have passed between now and the beginning of
1970)
publishedDate
=
datetime
.
datetime
.
fromtimestamp
(
result
[
'date'
]
/
1000.0
)
# timestamp (
milliseconds since
1970)
publishedDate
=
datetime
.
datetime
.
fromtimestamp
(
result
[
'date'
]
/
1000.0
)
# noqa
# append news result
results
.
append
({
'url'
:
result
[
'url'
],
...
...
searx/engines/google_images.py
View file @
a508d540
...
...
@@ -9,7 +9,7 @@
# @stable yes (but deprecated)
# @parse url, title, img_src
from
urllib
import
urlencode
,
unquote
from
urllib
import
urlencode
,
unquote
from
json
import
loads
# engine dependent config
...
...
searx/engines/kickass.py
View file @
a508d540
## Kickass Torrent (Videos, Music, Files)
#
#
# @website https://kickass.so
# @provide-api no (nothing found)
#
#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
...
...
@@ -13,7 +13,6 @@ from cgi import escape
from
urllib
import
quote
from
lxml
import
html
from
operator
import
itemgetter
from
dateutil
import
parser
# engine dependent config
categories
=
[
'videos'
,
'music'
,
'files'
]
...
...
@@ -33,7 +32,8 @@ def request(query, params):
params
[
'url'
]
=
search_url
.
format
(
search_term
=
quote
(
query
),
pageno
=
params
[
'pageno'
])
# FIX: SSLError: hostname 'kickass.so' doesn't match either of '*.kickass.to', 'kickass.to'
# FIX: SSLError: hostname 'kickass.so'
# doesn't match either of '*.kickass.to', 'kickass.to'
params
[
'verify'
]
=
False
return
params
...
...
searx/engines/soundcloud.py
View file @
a508d540
...
...
@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url
url
=
'https://api.soundcloud.com/'
search_url
=
url
+
'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
search_url
=
url
+
'search?{query}'
\
'&facet=model'
\
'&limit=20'
\
'&offset={offset}'
\
'&linked_partitioning=1'
\
'&client_id={client_id}'
# noqa
# do search-request
...
...
searx/engines/yahoo.py
View file @
a508d540
...
...
@@ -20,7 +20,8 @@ paging = True
language_support
=
True
# search-url
search_url
=
'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
base_url
=
'https://search.yahoo.com/'
search_url
=
'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables
results_xpath
=
'//div[@class="res"]'
...
...
@@ -57,9 +58,9 @@ def request(query, params):
else
:
language
=
params
[
'language'
]
.
split
(
'_'
)[
0
]
params
[
'url'
]
=
search_url
.
format
(
offset
=
offset
,
query
=
urlencode
({
'p'
:
query
}),
lang
=
language
)
params
[
'url'
]
=
base_url
+
search_url
.
format
(
offset
=
offset
,
query
=
urlencode
({
'p'
:
query
}),
lang
=
language
)
# TODO required?
params
[
'cookies'
][
'sB'
]
=
'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'
\
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment