Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
5f610771
Commit
5f610771
authored
Aug 25, 2016
by
c24b
Browse files
Options
Browse Files
Download
Plain Diff
MERGE OK [patch] romain-stable-patch + c24b-stable-patch
parents
db1b31a2
e5d4e175
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
226 additions
and
180 deletions
+226
-180
ISTEX.py
gargantext/util/parsers/ISTEX.py
+3
-3
main.py
gargantext/util/toolchain/main.py
+2
-1
ngrams_extraction.py
gargantext/util/toolchain/ngrams_extraction.py
+62
-85
parsing.py
gargantext/util/toolchain/parsing.py
+145
-82
istex.py
moissonneurs/istex.py
+14
-9
No files found.
gargantext/util/parsers/ISTEX.py
View file @
5f610771
...
...
@@ -27,7 +27,7 @@ class ISTexParser(Parser):
}
suma
=
0
print
(
len
(
json_docs
))
for
json_doc
in
json_docs
:
hyperdata
=
{}
...
...
@@ -92,7 +92,7 @@ class ISTexParser(Parser):
hyperdata
[
"language_iso3"
]
=
"eng"
# (cf. api.istex.fr/document/?q=*&facet=language
# et tests langid sur les language=["unknown"])
#just to be sure
hyperdata
=
self
.
format_hyperdata_languages
(
hyperdata
)
if
"publication_date"
in
hyperdata
:
...
...
gargantext/util/toolchain/main.py
View file @
5f610771
...
...
@@ -62,7 +62,8 @@ def parse_extract_indexhyperdata(corpus):
# apply actions
print
(
'CORPUS #
%
d'
%
(
corpus
.
id
))
parse
(
corpus
)
print
(
'CORPUS #
%
d: parsed'
%
(
corpus
.
id
))
docs
=
corpus
.
children
(
"DOCUMENT"
)
.
count
()
print
(
'CORPUS #
%
d: parsed
%
d'
%
(
corpus
.
id
,
docs
))
extract_ngrams
(
corpus
)
# Preparing Databse
...
...
gargantext/util/toolchain/ngrams_extraction.py
View file @
5f610771
...
...
@@ -47,60 +47,38 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
resource
=
corpus
.
resources
()[
0
]
documents_count
=
0
source
=
get_resource
(
resource
[
"type"
])
# preload available taggers for corpus languages
tagger_bots
=
{}
skipped_languages
=
{}
for
lang
in
corpus
.
hyperdata
[
'languages'
]:
try
:
tagger_bots
[
lang
]
=
load_tagger
(
lang
)()
except
KeyError
:
skipped_languages
[
lang
]
=
True
print
(
"WARNING skipping language:"
,
lang
)
# the list of todo docs
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
'skipped_docs'
]]
# go for the loop
#load only the docs that have passed the parsing without error
docs
=
[
doc
for
doc
in
corpus
.
children
(
'DOCUMENT'
)
if
doc
.
id
not
in
corpus
.
hyperdata
[
"skipped_docs"
]]
#load available taggers for source default langage
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
hyperdata
[
"languages"
]
if
lang
!=
"__skipped__"
}
#sort docs by lang?
# for lang, tagger in tagger_bots.items():
for
documents_count
,
document
in
enumerate
(
docs
):
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
#print(language_iso2)
# skip case if no tagger available
if
language_iso2
in
skipped_languages
:
corpus
.
hyperdata
[
'skipped_docs'
][
document
.
id
]
=
True
corpus
.
save_hyperdata
()
document
.
hyperdata
[
"error"
]
=
"Error: unsupported language"
document
.
save_hyperdata
()
session
.
commit
()
continue
# NORMAL CASE
if
language_iso2
in
source
[
"default_languages"
]:
#filtering out skipped_docs of parsing not necessary in here filtered out in docs???
#if document.id not in corpus.skipped_docs:
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
#>>> romain-stable-patch
#to do verify if document has no KEYS to index
for
key
in
keys
:
key
=
str
(
key
)
if
key
not
in
document
.
hyperdata
:
# print("DBG missing key in doc", key)
# TODO test if document has no keys at all
continue
# get a text value
value
=
document
[
key
]
try
:
value
=
document
.
hyperdata
[
str
(
key
)]
if
not
isinstance
(
value
,
str
):
print
(
"DBG wrong content in doc for key"
,
key
)
continue
try
:
# get ngrams
ngrams
=
tagger
.
extract
(
value
)
for
ngram
in
ngrams
:
for
ngram
in
tagger
.
extract
(
value
):
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],...]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
...
...
@@ -112,11 +90,13 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
Exception
as
e
:
print
(
'NGRAMS EXTRACTION skipping doc
%
i because of unknown error:'
%
document
.
id
,
str
(
e
))
# TODO add info to document.hyperdata['error']
except
:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
...
...
@@ -134,9 +114,6 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
corpus
.
hyperdata
[
'skipped_languages'
]
=
skipped_languages
corpus
.
save_hyperdata
()
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
commit
()
...
...
gargantext/util/toolchain/parsing.py
View file @
5f610771
...
...
@@ -6,37 +6,112 @@ from collections import defaultdict, Counter
from
re
import
sub
from
gargantext.util.languages
import
languages
,
detect_lang
def
parse
(
corpus
):
def
add_lang
(
languages
,
hyperdata
,
skipped_languages
):
'''utility to add lang information
1. on language_iso2
2. on other format language_
%
f
3. on text from concatenation of DEFAULT_INDEX_FIELDS
'''
if
"language_iso2"
in
hyperdata
.
keys
():
try
:
documents_count
=
0
corpus
.
status
(
'Docs'
,
progress
=
0
)
languages
[
hyperdata
[
"language_iso2"
]]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
hyperdata
[
"language_iso2"
]
skipped_languages
.
append
(
hyperdata
[
"language_iso2"
])
return
languages
,
hyperdata
,
skipped_languages
# this should be the responsability of the parserbot
elif
"language_iso3"
in
hyperdata
.
keys
():
#convert
try
:
lang
=
languages
[
hyperdata
[
"language_iso3"
]]
.
iso2
try
:
corpus
.
languages
[
lang
]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
print
(
"LANG not referenced"
,
(
hyperdata
[
"language_iso3"
]))
#skipped_languages.append(hyperdata["language_iso3"])
#hyperdata["error"] = "Error: unsupported language '%s'" %hyperdata["language_fullname"]
return
languages
,
hyperdata
,
skipped_languages
elif
"language_fullname"
in
hyperdata
.
keys
():
# shortcut to hyperdata's list of added resources (packs of docs)
resources
=
corpus
.
resources
()
try
:
#convert
lang
=
languages
[
hyperdata
[
"language_fullname"
]]
.
iso2
try
:
corpus
.
languages
[
lang
]
+=
1
return
corpus
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language
%
s"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
print
(
"LANG Not referenced"
,
(
hyperdata
[
"language_fullname"
]))
#hyperdata["error"] = "Error: unsupported language '%s'" %hyperdata["language_fullname"]
return
languages
,
hyperdata
,
skipped_languages
# vars to gather some infos during parsing (=> will end up in hyperdata)
skipped_docs
=
defaultdict
(
bool
)
observed_languages
=
defaultdict
(
int
)
# each resource contains a path to a file with the docs
for
i
,
resource
in
enumerate
(
resources
):
else
:
print
(
"[WARNING] no language_* found in document [parsing.py]"
)
#no language have been indexed
#detectlang by index_fields
# we'll only want the resources that have never been extracted
if
resource
[
"extracted"
]:
continue
text
=
" "
.
join
([
getattr
(
hyperdata
,
k
)
for
k
in
DEFAULT_INDEX_FIELDS
])
if
len
(
text
)
<
10
:
hyperdata
[
"error"
]
=
"Error: no TEXT fields to index"
skipped_languages
.
append
(
"__unknown__"
)
return
languages
,
hyperdata
,
skipped_languages
#detect_lang return iso2
lang
=
detect_lang
(
text
)
try
:
languages
[
lang
]
+=
1
return
languages
,
hyperdata
,
skipped_languages
except
KeyError
:
hyperdata
[
"error"
]
=
"Error: unsupported language '
%
s'"
%
lang
skipped_languages
.
append
(
lang
)
return
languages
,
hyperdata
,
skipped_languages
# the sourcetype's infos
source_infos
=
get_resource
(
resource
[
'type'
])
if
source_infos
[
"parser"
]
is
None
:
def
parse
(
corpus
):
try
:
print
(
"PARSING"
)
corpus
.
status
(
'Docs'
,
progress
=
0
)
#1 corpus => 1 or multi resources.path (for crawlers)
resources
=
corpus
.
resources
()
if
len
(
resources
)
==
0
:
return
#all the resources are of the same type for now
source
=
get_resource
(
resources
[
0
][
"type"
])
#get the sources capabilities for a given corpus resource
#load the corresponding parserbot
if
source
[
"parser"
]
is
None
:
#corpus.status(error)
raise
ValueError
(
"Resource '
%
s' has no Parser"
%
resource
[
"name"
])
else
:
# load the corresponding parser
parserbot
=
load_parser
(
source_infos
)
# extract and insert documents from resource.path into database
default_lang_field
=
[
"language_"
+
l
for
l
in
[
"iso2"
,
"iso3"
,
"full_name"
]]
parserbot
=
load_parser
(
source
)
#observed languages in default languages
languages
=
defaultdict
.
fromkeys
(
source
[
"default_languages"
],
0
)
#skipped_languages
skipped_languages
=
[]
#skipped docs to remember for later processing
skipped_docs
=
[]
#BY RESOURCE
for
i
,
resource
in
enumerate
(
resources
):
if
resource
[
"extracted"
]
is
True
:
continue
else
:
# BY documents
d
=
0
for
hyperdata
in
parserbot
(
resource
[
"path"
]):
# indexed text fields defined in CONSTANTS
for
k
in
DEFAULT_INDEX_FIELDS
:
...
...
@@ -45,80 +120,68 @@ def parse(corpus):
hyperdata
[
k
]
=
normalize_chars
(
hyperdata
[
k
])
except
Exception
as
error
:
hyperdata
[
"error"
]
=
"Error normalize_chars"
# any parserbot should implement a language_iso2
if
"language_iso2"
in
hyperdata
.
keys
():
observed_languages
[
hyperdata
[
"language_iso2"
]]
+=
1
# this should be the responsability of the parserbot
# elif "language_iso3" in hyperdata.keys():
# try:
# corpus.languages[languages(hyperdata["language_iso2"]).iso2] +=1
# except KeyError:
# hyperdata["error"] = "Error: unsupported language"
# skipped_languages.append(hyperdata["language_iso2"])
else
:
print
(
"[WARNING] no language_iso2 found in document [parsing.py]"
)
# no language has been found by parserbot
# => detectlang on index_fields
text
=
" "
.
join
([
getattr
(
hyperdata
,
k
,
''
)
for
k
in
DEFAULT_INDEX_FIELDS
])
if
len
(
text
)
<
10
:
hyperdata
[
"error"
]
=
"Error: no TEXT fields to index"
else
:
predicted_lang
=
detect_lang
(
text
)
hyperdata
[
"language_iso2"
]
=
predicted_lang
observed_languages
[
predicted_lang
]
+=
1
#else:
#print("[WARNING] No %s field found in hyperdata at parsing.py" %k)
# continue
#adding lang into record hyperdata
languages
,
hyperdata
,
skipped_languages
=
add_lang
(
languages
,
hyperdata
,
skipped_languages
)
# save as DB child
# ----------------
#d += 1
#print ("INSERT", d)
document
=
corpus
.
add_child
(
typename
=
'DOCUMENT'
,
name
=
hyperdata
.
get
(
'title'
,
''
)[:
255
],
hyperdata
=
hyperdata
,
)
#corpus.save_hyperdata()
session
.
add
(
document
)
session
.
commit
()
if
"error"
in
hyperdata
.
keys
():
#document.status("error")
document
.
status
(
'Parsing'
,
error
=
document
.
hyperdata
[
"error"
])
document
.
save_hyperdata
()
session
.
add
(
document
)
session
.
commit
()
# adding to skipped_docs for later processing
skipped_docs
[
document
.
id
]
=
True
documents_count
+=
1
# logging
if
documents_count
%
BATCH_PARSING_SIZE
==
0
:
corpus
.
status
(
'Docs'
,
progress
=
documents_count
)
corpus
.
save_hyperdata
()
#adding skipped_docs for later processsing
skipped_docs
.
append
(
document
.
id
)
#documents for this resources
session
.
add
(
corpus
)
session
.
commit
()
# update info about the resource
corpus
.
hyperdata
[
'resources'
][
i
][
'extracted'
]
=
True
corpus
.
save_hyperdata
()
session
.
commit
()
print
(
"PARSING:"
,
len
(
skipped_docs
),
"docs skipped"
)
print
(
"LANGUES"
)
for
n
in
observed_languages
.
items
():
print
(
n
)
# add the infos to hyperdata at the end
corpus
.
hyperdata
[
'skipped_docs'
]
=
skipped_docs
corpus
.
hyperdata
[
'languages'
]
=
observed_languages
corpus
.
save_hyperdata
()
# commit all changes
corpus
.
status
(
'Docs'
,
progress
=
documents_count
,
complete
=
True
)
resource
[
'extracted'
]
=
True
#print( "resource n°",i, ":", d, "docs inside this file")
#skipped_docs
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
))
print
(
len
(
corpus
.
skipped_docs
),
"docs skipped"
)
skipped_langs
=
dict
(
Counter
(
skipped_languages
))
if
len
(
corpus
.
skipped_docs
)
>
0
:
print
(
"in which:"
)
print
(
sum
(
skipped_langs
.
values
()),
"docs with unsupported lang"
)
print
(
corpus
.
children
(
"DOCUMENT"
)
.
count
(),
"docs parsed"
)
#languages INFO of corpus
print
(
languages
.
items
())
corpus
.
language_id
=
sorted
(
languages
.
items
(),
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)[
0
][
0
]
print
(
"Default MAIN language of CORPUS"
,
corpus
.
language_id
)
corpus
.
languages
=
dict
(
languages
)
corpus
.
languages
[
"__skipped__"
]
=
list
(
skipped_langs
.
keys
())
print
(
"Languages of CORPUS"
,
corpus
.
languages
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
#TODO: assign main lang of the corpus to unsupported languages docs
# for d_id in corpus.skipped_docs:
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
# if document.hyperdata["error"].startswith("Error: unsupported language"):
# print(document.hyperdata["language_iso2"])
# document.hyperdata["language_iso2"] = corpus.language_id
# document.save_hyperdata()
# session.commit()
except
Exception
as
error
:
corpus
.
status
(
'Docs'
,
error
=
error
)
corpus
.
save_hyperdata
()
...
...
moissonneurs/istex.py
View file @
5f610771
...
...
@@ -8,7 +8,7 @@ from traceback import print_tb
from
django.shortcuts
import
redirect
,
render
from
django.http
import
Http404
,
HttpResponseRedirect
,
HttpResponseForbidden
from
gargantext.constants
import
get_resource
_by_name
,
QUERY_SIZE_N_MAX
from
gargantext.constants
import
get_resource
,
QUERY_SIZE_N_MAX
from
gargantext.models.nodes
import
Node
from
gargantext.util.db
import
session
from
gargantext.util.http
import
JsonHttpResponse
...
...
@@ -16,7 +16,7 @@ from gargantext.util.scheduling import scheduled
from
gargantext.util.toolchain
import
parse_extract_indexhyperdata
from
moissonneurs.util
import
Scraper
RESOURCE_TYPE_ISTEX
=
8
def
query
(
request
):
...
...
@@ -85,7 +85,7 @@ def save(request , project_id):
query
=
"-"
query_string
=
"-"
N
=
QUERY_SIZE_N_MAX
#
N = QUERY_SIZE_N_MAX
if
"query"
in
request
.
POST
:
query
=
request
.
POST
[
"query"
]
...
...
@@ -96,10 +96,12 @@ def save(request , project_id):
N
=
QUERY_SIZE_N_MAX
else
:
N
=
int
(
request
.
POST
[
"N"
])
# query_size from views_opti
if
N
>
QUERY_SIZE_N_MAX
:
msg
=
"Invalid sample size N =
%
i (max =
%
i)"
%
(
N
,
QUERY_SIZE_N_MAX
)
print
(
"ERROR (scrap: istex d/l ): "
,
msg
)
raise
ValueError
(
msg
)
N
=
QUERY_SIZE_N_MAX
#msg = "Invalid sample size N = %i (max = %i)" % (N, QUERY_SIZE_N_MAX)
#print("ERROR (scrap: istex d/l ): ",msg)
#raise ValueError(msg)
print
(
"Scrapping Istex: '
%
s' (
%
i)"
%
(
query_string
,
N
))
...
...
@@ -107,6 +109,7 @@ def save(request , project_id):
pagesize
=
50
tasks
=
Scraper
()
chunks
=
list
(
tasks
.
chunks
(
range
(
N
),
pagesize
))
for
k
in
chunks
:
if
(
k
[
0
]
+
pagesize
)
>
N
:
pagesize
=
N
-
k
[
0
]
urlreqs
.
append
(
"http://api.istex.fr/document/?q="
+
query_string
+
"&output=*&"
+
"from="
+
str
(
k
[
0
])
+
"&size="
+
str
(
pagesize
))
...
...
@@ -131,6 +134,7 @@ def save(request , project_id):
t
=
threading
.
Thread
(
target
=
tasks
.
worker2
)
#thing to do
t
.
daemon
=
True
# thread dies when main thread (only non-daemon thread) exits.
t
.
start
()
for
url
in
urlreqs
:
tasks
.
q
.
put
(
url
)
#put a task in th queue
tasks
.
q
.
join
()
# wait until everything is finished
...
...
@@ -140,21 +144,21 @@ def save(request , project_id):
if
filename
!=
False
:
# add the uploaded resource to the corpus
corpus
.
add_resource
(
type
=
get_resource
_by_name
(
'ISTex'
)[
"type"
]
type
=
get_resource
(
RESOURCE_TYPE_ISTEX
)[
"type"
]
,
path
=
filename
)
dwnldsOK
+=
1
session
.
add
(
corpus
)
session
.
commit
()
corpus_id
=
corpus
.
id
#
corpus_id = corpus.id
if
dwnldsOK
==
0
:
return
JsonHttpResponse
([
"fail"
])
###########################
###########################
try
:
scheduled
(
parse_extract_indexhyperdata
)(
corpus
_
id
)
scheduled
(
parse_extract_indexhyperdata
)(
corpus
.
id
)
except
Exception
as
error
:
print
(
'WORKFLOW ERROR'
)
print
(
error
)
...
...
@@ -178,4 +182,5 @@ def save(request , project_id):
data
=
[
query_string
,
query
,
N
]
print
(
data
)
return
JsonHttpResponse
(
data
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment