Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
4545940d
Commit
4545940d
authored
8 years ago
by
c24b
Browse files
Options
Browse Files
Download
Plain Diff
MERGE OK [patch] romain-stable-patch + c24b-stable-patch
parents
8c8896bd
d1cf7dc9
Changes
5
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
226 additions
and
180 deletions
+226
-180
ISTEX.py
gargantext/util/parsers/ISTEX.py
+3
-3
main.py
gargantext/util/toolchain/main.py
+2
-1
ngrams_extraction.py
gargantext/util/toolchain/ngrams_extraction.py
+62
-85
parsing.py
gargantext/util/toolchain/parsing.py
+145
-82
istex.py
moissonneurs/istex.py
+14
-9
No files found.
gargantext/util/parsers/ISTEX.py
View file @
4545940d
...
...
@@ -27,7 +27,7 @@ class ISTexParser(Parser):
}
suma
=
0
print
(
len
(
json_docs
))
for
json_doc
in
json_docs
:
hyperdata
=
{}
...
...
@@ -92,7 +92,7 @@ class ISTexParser(Parser):
hyperdata
[
"language_iso3"
]
=
"eng"
# (cf. api.istex.fr/document/?q=*&facet=language
# et tests langid sur les language=["unknown"])
#just to be sure
hyperdata
=
self
.
format_hyperdata_languages
(
hyperdata
)
if
"publication_date"
in
hyperdata
:
...
...
This diff is collapsed.
Click to expand it.
gargantext/util/toolchain/main.py
View file @
4545940d
...
...
@@ -62,7 +62,8 @@ def parse_extract_indexhyperdata(corpus):
# apply actions
print
(
'CORPUS #
%
d'
%
(
corpus
.
id
))
parse
(
corpus
)
print
(
'CORPUS #
%
d: parsed'
%
(
corpus
.
id
))
docs
=
corpus
.
children
(
"DOCUMENT"
)
.
count
()
print
(
'CORPUS #
%
d: parsed
%
d'
%
(
corpus
.
id
,
docs
))
extract_ngrams
(
corpus
)
# Preparing Databse
...
...
This diff is collapsed.
Click to expand it.
gargantext/util/toolchain/ngrams_extraction.py
View file @
4545940d
...
...
@@ -47,60 +47,38 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
resource
=
corpus
.
resources
()[
0
]
documents_count
=
0
source
=
get_resource
(
resource
[
"type"
])
# preload available taggers for corpus languages
tagger_bots
=
{}
skipped_languages
=
{}
for
lang
in
corpus
.
hyperdata
[
'languages'
]:
try
:
tagger_bots
[
lang
]
=
load_tagger
(
lang
)()
except
KeyError
:
skipped_languages
[
lang
]
=
True
print
(
"WARNING skipping language:"
,
lang
)
# the list of todo docs
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
'skipped_docs'
]]
# go for the loop
#load only the docs that have passed the parsing without error
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
"skipped_docs"
]]
#load available taggers for source default langage
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
hyperdata
[
"languages"
]
if
lang
!=
"__skipped__"
}
#sort docs by lang?
# for lang, tagger in tagger_bots.items():
for
documents_count
,
document
in
enumerate
(
docs
):
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
#print(language_iso2)
# skip case if no tagger available
if
language_iso2
in
skipped_languages
:
corpus
.
hyperdata
[
'skipped_docs'
][
document
.
id
]
=
True
corpus
.
save_hyperdata
()
document
.
hyperdata
[
"error"
]
=
"Error: unsupported language"
document
.
save_hyperdata
()
session
.
commit
()
continue
# NORMAL CASE
if
language_iso2
in
source
[
"default_languages"
]:
#filtering out skipped_docs of parsing not necessary in here filtered out in docs???
#if document.id not in corpus.skipped_docs:
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
#>>> romain-stable-patch
#to do verify if document has no KEYS to index
for
key
in
keys
:
key
=
str
(
key
)
if
key
not
in
document
.
hyperdata
:
# print("DBG missing key in doc", key)
# TODO test if document has no keys at all
continue
# get a text value
value
=
document
[
key
]
try
:
value
=
document
.
hyperdata
[
str
(
key
)]
if
not
isinstance
(
value
,
str
):
print
(
"DBG wrong content in doc for key"
,
key
)
continue
try
:
# get ngrams
ngrams
=
tagger
.
extract
(
value
)
for
ngram
in
ngrams
:
for
ngram
in
tagger
.
extract
(
value
):
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],...]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
...
...
@@ -112,11 +90,13 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
Exception
as
e
:
print
(
'NGRAMS EXTRACTION skipping doc
%
i because of unknown error:'
%
document
.
id
,
str
(
e
))
# TODO add info to document.hyperdata['error']
except
:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
...
...
@@ -134,9 +114,6 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
corpus
.
hyperdata
[
'skipped_languages'
]
=
skipped_languages
corpus
.
save_hyperdata
()
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
commit
()
...
...
This diff is collapsed.
Click to expand it.
gargantext/util/toolchain/parsing.py
View file @
4545940d
This diff is collapsed.
Click to expand it.
moissonneurs/istex.py
View file @
4545940d
...
...
@@ -8,7 +8,7 @@ from traceback import print_tb
from
django.shortcuts
import
redirect
,
render
from
django.http
import
Http404
,
HttpResponseRedirect
,
HttpResponseForbidden
from
gargantext.constants
import
get_resource
_by_name
,
QUERY_SIZE_N_MAX
from
gargantext.constants
import
get_resource
,
QUERY_SIZE_N_MAX
from
gargantext.models.nodes
import
Node
from
gargantext.util.db
import
session
from
gargantext.util.http
import
JsonHttpResponse
...
...
@@ -16,7 +16,7 @@ from gargantext.util.scheduling import scheduled
from
gargantext.util.toolchain
import
parse_extract_indexhyperdata
from
moissonneurs.util
import
Scraper
RESOURCE_TYPE_ISTEX
=
8
def
query
(
request
):
...
...
@@ -85,7 +85,7 @@ def save(request , project_id):
query
=
"-"
query_string
=
"-"
N
=
QUERY_SIZE_N_MAX
#
N = QUERY_SIZE_N_MAX
if
"query"
in
request
.
POST
:
query
=
request
.
POST
[
"query"
]
...
...
@@ -96,10 +96,12 @@ def save(request , project_id):
N
=
QUERY_SIZE_N_MAX
else
:
N
=
int
(
request
.
POST
[
"N"
])
# query_size from views_opti
if
N
>
QUERY_SIZE_N_MAX
:
msg
=
"Invalid sample size N =
%
i (max =
%
i)"
%
(
N
,
QUERY_SIZE_N_MAX
)
print
(
"ERROR (scrap: istex d/l ): "
,
msg
)
raise
ValueError
(
msg
)
N
=
QUERY_SIZE_N_MAX
#msg = "Invalid sample size N = %i (max = %i)" % (N, QUERY_SIZE_N_MAX)
#print("ERROR (scrap: istex d/l ): ",msg)
#raise ValueError(msg)
print
(
"Scrapping Istex: '
%
s' (
%
i)"
%
(
query_string
,
N
))
...
...
@@ -107,6 +109,7 @@ def save(request , project_id):
pagesize
=
50
tasks
=
Scraper
()
chunks
=
list
(
tasks
.
chunks
(
range
(
N
),
pagesize
))
for
k
in
chunks
:
if
(
k
[
0
]
+
pagesize
)
>
N
:
pagesize
=
N
-
k
[
0
]
urlreqs
.
append
(
"http://api.istex.fr/document/?q="
+
query_string
+
"&output=*&"
+
"from="
+
str
(
k
[
0
])
+
"&size="
+
str
(
pagesize
))
...
...
@@ -131,6 +134,7 @@ def save(request , project_id):
t
=
threading
.
Thread
(
target
=
tasks
.
worker2
)
#thing to do
t
.
daemon
=
True
# thread dies when main thread (only non-daemon thread) exits.
t
.
start
()
for
url
in
urlreqs
:
tasks
.
q
.
put
(
url
)
#put a task in th queue
tasks
.
q
.
join
()
# wait until everything is finished
...
...
@@ -140,21 +144,21 @@ def save(request , project_id):
if
filename
!=
False
:
# add the uploaded resource to the corpus
corpus
.
add_resource
(
type
=
get_resource
_by_name
(
'ISTex'
)[
"type"
]
type
=
get_resource
(
RESOURCE_TYPE_ISTEX
)[
"type"
]
,
path
=
filename
)
dwnldsOK
+=
1
session
.
add
(
corpus
)
session
.
commit
()
corpus_id
=
corpus
.
id
#
corpus_id = corpus.id
if
dwnldsOK
==
0
:
return
JsonHttpResponse
([
"fail"
])
###########################
###########################
try
:
scheduled
(
parse_extract_indexhyperdata
)(
corpus
_
id
)
scheduled
(
parse_extract_indexhyperdata
)(
corpus
.
id
)
except
Exception
as
error
:
print
(
'WORKFLOW ERROR'
)
print
(
error
)
...
...
@@ -178,4 +182,5 @@ def save(request , project_id):
data
=
[
query_string
,
query
,
N
]
print
(
data
)
return
JsonHttpResponse
(
data
)
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment