Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
5f610771
Commit
5f610771
authored
Aug 25, 2016
by
c24b
Browse files
Options
Browse Files
Download
Plain Diff
MERGE OK [patch] romain-stable-patch + c24b-stable-patch
parents
db1b31a2
e5d4e175
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
226 additions
and
180 deletions
+226
-180
ISTEX.py
gargantext/util/parsers/ISTEX.py
+3
-3
main.py
gargantext/util/toolchain/main.py
+2
-1
ngrams_extraction.py
gargantext/util/toolchain/ngrams_extraction.py
+62
-85
parsing.py
gargantext/util/toolchain/parsing.py
+145
-82
istex.py
moissonneurs/istex.py
+14
-9
No files found.
gargantext/util/parsers/ISTEX.py
View file @
5f610771
...
@@ -27,7 +27,7 @@ class ISTexParser(Parser):
...
@@ -27,7 +27,7 @@ class ISTexParser(Parser):
}
}
suma
=
0
suma
=
0
print
(
len
(
json_docs
))
for
json_doc
in
json_docs
:
for
json_doc
in
json_docs
:
hyperdata
=
{}
hyperdata
=
{}
...
@@ -92,9 +92,9 @@ class ISTexParser(Parser):
...
@@ -92,9 +92,9 @@ class ISTexParser(Parser):
hyperdata
[
"language_iso3"
]
=
"eng"
hyperdata
[
"language_iso3"
]
=
"eng"
# (cf. api.istex.fr/document/?q=*&facet=language
# (cf. api.istex.fr/document/?q=*&facet=language
# et tests langid sur les language=["unknown"])
# et tests langid sur les language=["unknown"])
#just to be sure
hyperdata
=
self
.
format_hyperdata_languages
(
hyperdata
)
hyperdata
=
self
.
format_hyperdata_languages
(
hyperdata
)
if
"publication_date"
in
hyperdata
:
if
"publication_date"
in
hyperdata
:
RealDate
=
hyperdata
[
"publication_date"
]
RealDate
=
hyperdata
[
"publication_date"
]
if
"publication_date"
in
hyperdata
:
if
"publication_date"
in
hyperdata
:
...
...
gargantext/util/toolchain/main.py
View file @
5f610771
...
@@ -62,7 +62,8 @@ def parse_extract_indexhyperdata(corpus):
...
@@ -62,7 +62,8 @@ def parse_extract_indexhyperdata(corpus):
# apply actions
# apply actions
print
(
'CORPUS #
%
d'
%
(
corpus
.
id
))
print
(
'CORPUS #
%
d'
%
(
corpus
.
id
))
parse
(
corpus
)
parse
(
corpus
)
print
(
'CORPUS #
%
d: parsed'
%
(
corpus
.
id
))
docs
=
corpus
.
children
(
"DOCUMENT"
)
.
count
()
print
(
'CORPUS #
%
d: parsed
%
d'
%
(
corpus
.
id
,
docs
))
extract_ngrams
(
corpus
)
extract_ngrams
(
corpus
)
# Preparing Databse
# Preparing Databse
...
...
gargantext/util/toolchain/ngrams_extraction.py
View file @
5f610771
...
@@ -47,99 +47,76 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
...
@@ -47,99 +47,76 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
resource
=
corpus
.
resources
()[
0
]
resource
=
corpus
.
resources
()[
0
]
documents_count
=
0
documents_count
=
0
source
=
get_resource
(
resource
[
"type"
])
source
=
get_resource
(
resource
[
"type"
])
#load only the docs that have passed the parsing without error
# preload available taggers for corpus languages
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
"skipped_docs"
]]
tagger_bots
=
{}
#load available taggers for source default langage
skipped_languages
=
{}
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
hyperdata
[
"languages"
]
if
lang
!=
"__skipped__"
}
#sort docs by lang?
for
lang
in
corpus
.
hyperdata
[
'languages'
]:
# for lang, tagger in tagger_bots.items():
try
:
tagger_bots
[
lang
]
=
load_tagger
(
lang
)()
except
KeyError
:
skipped_languages
[
lang
]
=
True
print
(
"WARNING skipping language:"
,
lang
)
# the list of todo docs
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
'skipped_docs'
]]
# go for the loop
for
documents_count
,
document
in
enumerate
(
docs
):
for
documents_count
,
document
in
enumerate
(
docs
):
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
#print(language_iso2)
# skip case if no tagger available
if
language_iso2
in
source
[
"default_languages"
]:
if
language_iso2
in
skipped_languages
:
#filtering out skipped_docs of parsing not necessary in here filtered out in docs???
corpus
.
hyperdata
[
'skipped_docs'
][
document
.
id
]
=
True
#if document.id not in corpus.skipped_docs:
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
#>>> romain-stable-patch
#to do verify if document has no KEYS to index
for
key
in
keys
:
try
:
value
=
document
.
hyperdata
[
str
(
key
)]
if
not
isinstance
(
value
,
str
):
print
(
"DBG wrong content in doc for key"
,
key
)
continue
# get ngrams
for
ngram
in
tagger
.
extract
(
value
):
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
for
seqterm
in
subterms
:
ngram
=
' '
.
join
(
seqterm
)
if
len
(
ngram
)
>
1
:
# doc <=> ngram index
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
if
documents_count
%
BATCH_NGRAMSEXTRACTION_SIZE
==
0
:
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
)
corpus
.
save_hyperdata
()
corpus
.
save_hyperdata
()
document
.
hyperdata
[
"error"
]
=
"Error: unsupported language"
session
.
add
(
corpus
)
document
.
save_hyperdata
()
session
.
commit
()
session
.
commit
()
continue
# NORMAL CASE
tagger
=
tagger_bots
[
language_iso2
]
for
key
in
keys
:
key
=
str
(
key
)
if
key
not
in
document
.
hyperdata
:
# print("DBG missing key in doc", key)
# TODO test if document has no keys at all
continue
# get a text value
value
=
document
[
key
]
if
not
isinstance
(
value
,
str
):
print
(
"DBG wrong content in doc for key"
,
key
)
continue
try
:
# get ngrams
ngrams
=
tagger
.
extract
(
value
)
for
ngram
in
ngrams
:
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],...]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
for
seqterm
in
subterms
:
ngram
=
' '
.
join
(
seqterm
)
if
len
(
ngram
)
>
1
:
# doc <=> ngram index
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
Exception
as
e
:
print
(
'NGRAMS EXTRACTION skipping doc
%
i because of unknown error:'
%
document
.
id
,
str
(
e
))
# TODO add info to document.hyperdata['error']
pass
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
if
documents_count
%
BATCH_NGRAMSEXTRACTION_SIZE
==
0
:
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
# integrate ngrams and nodes-ngrams (le reste)
# integrate ngrams and nodes-ngrams (le reste)
if
len
(
nodes_ngrams_count
)
>
0
:
if
len
(
nodes_ngrams_count
)
>
0
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
ngrams_data
.
clear
()
corpus
.
hyperdata
[
'skipped_languages'
]
=
skipped_languages
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
corpus
.
save_hyperdata
()
session
.
commit
()
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
commit
()
except
Exception
as
error
:
except
Exception
as
error
:
corpus
.
status
(
'Ngrams'
,
error
=
error
)
corpus
.
status
(
'Ngrams'
,
error
=
error
)
...
...
gargantext/util/toolchain/parsing.py
View file @
5f610771
...
@@ -6,37 +6,112 @@ from collections import defaultdict, Counter
...
@@ -6,37 +6,112 @@ from collections import defaultdict, Counter
from
re
import
sub
from
re
import
sub
from
gargantext.util.languages
import
languages
,
detect_lang
from
gargantext.util.languages
import
languages
,
detect_lang
def
add_lang
(
languages
,
hyperdata
,
skipped_languages
):
'''utility to add lang information
1. on language_iso2
2. on other format language_
%
f
3. on text from concatenation of DEFAULT_INDEX_FIELDS
'''
if
"language_iso2"
in
hyperdata
.
keys
():
try
:
languages
[
hyperdata
[
"language_iso2"
]]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
hyperdata
[
"language_iso2"
]
skipped_languages
.
append
(
hyperdata
[
"language_iso2"
])
return
languages
,
hyperdata
,
skipped_languages
# this should be the responsability of the parserbot
elif
"language_iso3"
in
hyperdata
.
keys
():
#convert
try
:
lang
=
languages
[
hyperdata
[
"language_iso3"
]]
.
iso2
try
:
corpus
.
languages
[
lang
]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
print
(
"LANG not referenced"
,
(
hyperdata
[
"language_iso3"
]))
#skipped_languages.append(hyperdata["language_iso3"])
#hyperdata["error"] = "Error: unsupported language '%s'" %hyperdata["language_fullname"]
return
languages
,
hyperdata
,
skipped_languages
elif
"language_fullname"
in
hyperdata
.
keys
():
try
:
#convert
lang
=
languages
[
hyperdata
[
"language_fullname"
]]
.
iso2
try
:
corpus
.
languages
[
lang
]
+=
1
return
corpus
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
print
(
"LANG Not referenced"
,
(
hyperdata
[
"language_fullname"
]))
#hyperdata["error"] = "Error: unsupported language '%s'" %hyperdata["language_fullname"]
return
languages
,
hyperdata
,
skipped_languages
else
:
print
(
"[WARNING] no language_* found in document [parsing.py]"
)
#no language have been indexed
#detectlang by index_fields
text
=
" "
.
join
([
getattr
(
hyperdata
,
k
)
for
k
in
DEFAULT_INDEX_FIELDS
])
if
len
(
text
)
<
10
:
hyperdata
[
"error"
]
=
"Error: no TEXT fields to index"
skipped_languages
.
append
(
"__unknown__"
)
return
languages
,
hyperdata
,
skipped_languages
#detect_lang return iso2
lang
=
detect_lang
(
text
)
try
:
languages
[
lang
]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language '
%
s'"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
def
parse
(
corpus
):
def
parse
(
corpus
):
try
:
try
:
documents_count
=
0
print
(
"PARSING"
)
corpus
.
status
(
'Docs'
,
progress
=
0
)
# shortcut to hyperdata's list of added resources (packs of docs)
corpus
.
status
(
'Docs'
,
progress
=
0
)
#1 corpus => 1 or multi resources.path (for crawlers)
resources
=
corpus
.
resources
()
resources
=
corpus
.
resources
()
if
len
(
resources
)
==
0
:
# vars to gather some infos during parsing (=> will end up in hyperdata)
return
skipped_docs
=
defaultdict
(
bool
)
#all the resources are of the same type for now
observed_languages
=
defaultdict
(
int
)
source
=
get_resource
(
resources
[
0
][
"type"
])
#get the sources capabilities for a given corpus resource
# each resource contains a path to a file with the docs
#load the corresponding parserbot
for
i
,
resource
in
enumerate
(
resources
):
if
source
[
"parser"
]
is
None
:
#corpus.status(error)
# we'll only want the resources that have never been extracted
raise
ValueError
(
"Resource '
%
s' has no Parser"
%
resource
[
"name"
])
if
resource
[
"extracted"
]:
parserbot
=
load_parser
(
source
)
#observed languages in default languages
languages
=
defaultdict
.
fromkeys
(
source
[
"default_languages"
],
0
)
#skipped_languages
skipped_languages
=
[]
#skipped docs to remember for later processing
skipped_docs
=
[]
#BY RESOURCE
for
i
,
resource
in
enumerate
(
resources
):
if
resource
[
"extracted"
]
is
True
:
continue
continue
# the sourcetype's infos
source_infos
=
get_resource
(
resource
[
'type'
])
if
source_infos
[
"parser"
]
is
None
:
#corpus.status(error)
raise
ValueError
(
"Resource '
%
s' has no Parser"
%
resource
[
"name"
])
else
:
else
:
# load the corresponding parser
# BY documents
parserbot
=
load_parser
(
source_infos
)
d
=
0
# extract and insert documents from resource.path into database
default_lang_field
=
[
"language_"
+
l
for
l
in
[
"iso2"
,
"iso3"
,
"full_name"
]]
for
hyperdata
in
parserbot
(
resource
[
"path"
]):
for
hyperdata
in
parserbot
(
resource
[
"path"
]):
# indexed text fields defined in CONSTANTS
# indexed text fields defined in CONSTANTS
for
k
in
DEFAULT_INDEX_FIELDS
:
for
k
in
DEFAULT_INDEX_FIELDS
:
...
@@ -45,80 +120,68 @@ def parse(corpus):
...
@@ -45,80 +120,68 @@ def parse(corpus):
hyperdata
[
k
]
=
normalize_chars
(
hyperdata
[
k
])
hyperdata
[
k
]
=
normalize_chars
(
hyperdata
[
k
])
except
Exception
as
error
:
except
Exception
as
error
:
hyperdata
[
"error"
]
=
"Error normalize_chars"
hyperdata
[
"error"
]
=
"Error normalize_chars"
#else:
# any parserbot should implement a language_iso2
#print("[WARNING] No %s field found in hyperdata at parsing.py" %k)
if
"language_iso2"
in
hyperdata
.
keys
():
# continue
observed_languages
[
hyperdata
[
"language_iso2"
]]
+=
1
#adding lang into record hyperdata
languages
,
hyperdata
,
skipped_languages
=
add_lang
(
languages
,
hyperdata
,
skipped_languages
)
# this should be the responsability of the parserbot
# elif "language_iso3" in hyperdata.keys():
# try:
# corpus.languages[languages(hyperdata["language_iso2"]).iso2] +=1
# except KeyError:
# hyperdata["error"] = "Error: unsupported language"
# skipped_languages.append(hyperdata["language_iso2"])
else
:
print
(
"[WARNING] no language_iso2 found in document [parsing.py]"
)
# no language has been found by parserbot
# => detectlang on index_fields
text
=
" "
.
join
([
getattr
(
hyperdata
,
k
,
''
)
for
k
in
DEFAULT_INDEX_FIELDS
])
if
len
(
text
)
<
10
:
hyperdata
[
"error"
]
=
"Error: no TEXT fields to index"
else
:
predicted_lang
=
detect_lang
(
text
)
hyperdata
[
"language_iso2"
]
=
predicted_lang
observed_languages
[
predicted_lang
]
+=
1
# save as DB child
# save as DB child
# ----------------
# ----------------
#d += 1
#print ("INSERT", d)
document
=
corpus
.
add_child
(
document
=
corpus
.
add_child
(
typename
=
'DOCUMENT'
,
typename
=
'DOCUMENT'
,
name
=
hyperdata
.
get
(
'title'
,
''
)[:
255
],
name
=
hyperdata
.
get
(
'title'
,
''
)[:
255
],
hyperdata
=
hyperdata
,
hyperdata
=
hyperdata
,
)
)
#corpus.save_hyperdata()
session
.
add
(
document
)
session
.
add
(
document
)
session
.
commit
()
if
"error"
in
hyperdata
.
keys
():
if
"error"
in
hyperdata
.
keys
():
#document.status("error")
#document.status("error")
document
.
status
(
'Parsing'
,
error
=
document
.
hyperdata
[
"error"
])
document
.
status
(
'Parsing'
,
error
=
document
.
hyperdata
[
"error"
])
document
.
save_hyperdata
()
document
.
save_hyperdata
()
session
.
add
(
document
)
session
.
commit
()
session
.
commit
()
#adding skipped_docs for later processsing
# adding to skipped_docs for later processing
skipped_docs
.
append
(
document
.
id
)
skipped_docs
[
document
.
id
]
=
True
#documents for this resources
session
.
add
(
corpus
)
documents_count
+=
1
session
.
commit
()
# update info about the resource
# logging
resource
[
'extracted'
]
=
True
if
documents_count
%
BATCH_PARSING_SIZE
==
0
:
#print( "resource n°",i, ":", d, "docs inside this file")
corpus
.
status
(
'Docs'
,
progress
=
documents_count
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
#skipped_docs
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
))
# update info about the resource
print
(
len
(
corpus
.
skipped_docs
),
"docs skipped"
)
corpus
.
hyperdata
[
'resources'
][
i
][
'extracted'
]
=
True
skipped_langs
=
dict
(
Counter
(
skipped_languages
))
corpus
.
save_hyperdata
()
if
len
(
corpus
.
skipped_docs
)
>
0
:
session
.
commit
(
)
print
(
"in which:"
)
print
(
sum
(
skipped_langs
.
values
()),
"docs with unsupported lang"
)
print
(
"PARSING:"
,
len
(
skipped_docs
),
"docs skipped"
)
print
(
corpus
.
children
(
"DOCUMENT"
)
.
count
(),
"docs parsed"
)
print
(
"LANGUES"
)
#languages INFO of corpus
for
n
in
observed_languages
.
items
():
print
(
languages
.
items
())
print
(
n
)
corpus
.
language_id
=
sorted
(
languages
.
items
(),
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)[
0
][
0
]
print
(
"Default MAIN language of CORPUS"
,
corpus
.
language_id
)
# add the infos to hyperdata at the end
corpus
.
languages
=
dict
(
languages
)
corpus
.
hyperdata
[
'skipped_docs'
]
=
skipped_docs
corpus
.
languages
[
"__skipped__"
]
=
list
(
skipped_langs
.
keys
())
corpus
.
hyperdata
[
'languages'
]
=
observed_languages
print
(
"Languages of CORPUS"
,
corpus
.
languages
)
corpus
.
save_hyperdata
()
corpus
.
save_hyperdata
()
# commit all changes
corpus
.
status
(
'Docs'
,
progress
=
documents_count
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
session
.
commit
()
#TODO: assign main lang of the corpus to unsupported languages docs
# for d_id in corpus.skipped_docs:
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
# if document.hyperdata["error"].startswith("Error: unsupported language"):
# print(document.hyperdata["language_iso2"])
# document.hyperdata["language_iso2"] = corpus.language_id
# document.save_hyperdata()
# session.commit()
except
Exception
as
error
:
except
Exception
as
error
:
corpus
.
status
(
'Docs'
,
error
=
error
)
corpus
.
status
(
'Docs'
,
error
=
error
)
corpus
.
save_hyperdata
()
corpus
.
save_hyperdata
()
...
...
moissonneurs/istex.py
View file @
5f610771
...
@@ -8,7 +8,7 @@ from traceback import print_tb
...
@@ -8,7 +8,7 @@ from traceback import print_tb
from
django.shortcuts
import
redirect
,
render
from
django.shortcuts
import
redirect
,
render
from
django.http
import
Http404
,
HttpResponseRedirect
,
HttpResponseForbidden
from
django.http
import
Http404
,
HttpResponseRedirect
,
HttpResponseForbidden
from
gargantext.constants
import
get_resource
_by_name
,
QUERY_SIZE_N_MAX
from
gargantext.constants
import
get_resource
,
QUERY_SIZE_N_MAX
from
gargantext.models.nodes
import
Node
from
gargantext.models.nodes
import
Node
from
gargantext.util.db
import
session
from
gargantext.util.db
import
session
from
gargantext.util.http
import
JsonHttpResponse
from
gargantext.util.http
import
JsonHttpResponse
...
@@ -16,7 +16,7 @@ from gargantext.util.scheduling import scheduled
...
@@ -16,7 +16,7 @@ from gargantext.util.scheduling import scheduled
from
gargantext.util.toolchain
import
parse_extract_indexhyperdata
from
gargantext.util.toolchain
import
parse_extract_indexhyperdata
from
moissonneurs.util
import
Scraper
from
moissonneurs.util
import
Scraper
RESOURCE_TYPE_ISTEX
=
8
def
query
(
request
):
def
query
(
request
):
...
@@ -85,7 +85,7 @@ def save(request , project_id):
...
@@ -85,7 +85,7 @@ def save(request , project_id):
query
=
"-"
query
=
"-"
query_string
=
"-"
query_string
=
"-"
N
=
QUERY_SIZE_N_MAX
#
N = QUERY_SIZE_N_MAX
if
"query"
in
request
.
POST
:
if
"query"
in
request
.
POST
:
query
=
request
.
POST
[
"query"
]
query
=
request
.
POST
[
"query"
]
...
@@ -96,10 +96,12 @@ def save(request , project_id):
...
@@ -96,10 +96,12 @@ def save(request , project_id):
N
=
QUERY_SIZE_N_MAX
N
=
QUERY_SIZE_N_MAX
else
:
else
:
N
=
int
(
request
.
POST
[
"N"
])
# query_size from views_opti
N
=
int
(
request
.
POST
[
"N"
])
# query_size from views_opti
if
N
>
QUERY_SIZE_N_MAX
:
if
N
>
QUERY_SIZE_N_MAX
:
msg
=
"Invalid sample size N =
%
i (max =
%
i)"
%
(
N
,
QUERY_SIZE_N_MAX
)
N
=
QUERY_SIZE_N_MAX
print
(
"ERROR (scrap: istex d/l ): "
,
msg
)
#msg = "Invalid sample size N = %i (max = %i)" % (N, QUERY_SIZE_N_MAX)
raise
ValueError
(
msg
)
#print("ERROR (scrap: istex d/l ): ",msg)
#raise ValueError(msg)
print
(
"Scrapping Istex: '
%
s' (
%
i)"
%
(
query_string
,
N
))
print
(
"Scrapping Istex: '
%
s' (
%
i)"
%
(
query_string
,
N
))
...
@@ -107,6 +109,7 @@ def save(request , project_id):
...
@@ -107,6 +109,7 @@ def save(request , project_id):
pagesize
=
50
pagesize
=
50
tasks
=
Scraper
()
tasks
=
Scraper
()
chunks
=
list
(
tasks
.
chunks
(
range
(
N
),
pagesize
))
chunks
=
list
(
tasks
.
chunks
(
range
(
N
),
pagesize
))
for
k
in
chunks
:
for
k
in
chunks
:
if
(
k
[
0
]
+
pagesize
)
>
N
:
pagesize
=
N
-
k
[
0
]
if
(
k
[
0
]
+
pagesize
)
>
N
:
pagesize
=
N
-
k
[
0
]
urlreqs
.
append
(
"http://api.istex.fr/document/?q="
+
query_string
+
"&output=*&"
+
"from="
+
str
(
k
[
0
])
+
"&size="
+
str
(
pagesize
))
urlreqs
.
append
(
"http://api.istex.fr/document/?q="
+
query_string
+
"&output=*&"
+
"from="
+
str
(
k
[
0
])
+
"&size="
+
str
(
pagesize
))
...
@@ -131,6 +134,7 @@ def save(request , project_id):
...
@@ -131,6 +134,7 @@ def save(request , project_id):
t
=
threading
.
Thread
(
target
=
tasks
.
worker2
)
#thing to do
t
=
threading
.
Thread
(
target
=
tasks
.
worker2
)
#thing to do
t
.
daemon
=
True
# thread dies when main thread (only non-daemon thread) exits.
t
.
daemon
=
True
# thread dies when main thread (only non-daemon thread) exits.
t
.
start
()
t
.
start
()
for
url
in
urlreqs
:
for
url
in
urlreqs
:
tasks
.
q
.
put
(
url
)
#put a task in th queue
tasks
.
q
.
put
(
url
)
#put a task in th queue
tasks
.
q
.
join
()
# wait until everything is finished
tasks
.
q
.
join
()
# wait until everything is finished
...
@@ -140,21 +144,21 @@ def save(request , project_id):
...
@@ -140,21 +144,21 @@ def save(request , project_id):
if
filename
!=
False
:
if
filename
!=
False
:
# add the uploaded resource to the corpus
# add the uploaded resource to the corpus
corpus
.
add_resource
(
corpus
.
add_resource
(
type
=
get_resource
_by_name
(
'ISTex'
)[
"type"
]
type
=
get_resource
(
RESOURCE_TYPE_ISTEX
)[
"type"
]
,
path
=
filename
,
path
=
filename
)
)
dwnldsOK
+=
1
dwnldsOK
+=
1
session
.
add
(
corpus
)
session
.
add
(
corpus
)
session
.
commit
()
session
.
commit
()
corpus_id
=
corpus
.
id
#
corpus_id = corpus.id
if
dwnldsOK
==
0
:
if
dwnldsOK
==
0
:
return
JsonHttpResponse
([
"fail"
])
return
JsonHttpResponse
([
"fail"
])
###########################
###########################
###########################
###########################
try
:
try
:
scheduled
(
parse_extract_indexhyperdata
)(
corpus
_
id
)
scheduled
(
parse_extract_indexhyperdata
)(
corpus
.
id
)
except
Exception
as
error
:
except
Exception
as
error
:
print
(
'WORKFLOW ERROR'
)
print
(
'WORKFLOW ERROR'
)
print
(
error
)
print
(
error
)
...
@@ -178,4 +182,5 @@ def save(request , project_id):
...
@@ -178,4 +182,5 @@ def save(request , project_id):
data
=
[
query_string
,
query
,
N
]
data
=
[
query_string
,
query
,
N
]
print
(
data
)
return
JsonHttpResponse
(
data
)
return
JsonHttpResponse
(
data
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment