Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
e5d4e175
Commit
e5d4e175
authored
Aug 24, 2016
by
c24b
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
FIX TAGERBOT default_lang
parent
dbb66340
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
72 additions
and
68 deletions
+72
-68
ngrams_extraction.py
gargantext/util/toolchain/ngrams_extraction.py
+56
-52
parsing.py
gargantext/util/toolchain/parsing.py
+16
-16
No files found.
gargantext/util/toolchain/ngrams_extraction.py
View file @
e5d4e175
...
...
@@ -52,62 +52,66 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
languages
if
lang
!=
"__skipped__"
}
#sort docs by lang?
# for lang, tagger in tagger_bots.items():
for
documents_count
,
document
in
enumerate
(
docs
):
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
for
key
in
keys
:
try
:
value
=
document
[
str
(
key
)]
if
not
isinstance
(
value
,
str
):
continue
# get ngrams
for
ngram
in
tagger
.
extract
(
value
):
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
for
seqterm
in
subterms
:
ngram
=
' '
.
join
(
seqterm
)
if
len
(
ngram
)
>
1
:
# doc <=> ngram index
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
if
documents_count
%
BATCH_NGRAMSEXTRACTION_SIZE
==
0
:
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
)
if
language_iso2
in
source
[
"default_languages"
]:
#filtering out skipped_docsof parsing
#if document.id not in corpus.skipped_docs:
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
for
key
in
keys
:
try
:
value
=
document
[
str
(
key
)]
if
not
isinstance
(
value
,
str
):
continue
# get ngrams
for
ngram
in
tagger
.
extract
(
value
):
tokens
=
tuple
(
normalize_forms
(
token
[
0
])
for
token
in
ngram
)
if
do_subngrams
:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms
=
subsequences
(
tokens
)
else
:
subterms
=
[
tokens
]
for
seqterm
in
subterms
:
ngram
=
' '
.
join
(
seqterm
)
if
len
(
ngram
)
>
1
:
# doc <=> ngram index
nodes_ngrams_count
[(
document
.
id
,
ngram
)]
+=
1
# add fields : terms n
ngrams_data
.
add
((
ngram
[:
255
],
len
(
seqterm
),
))
except
:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if
len
(
nodes_ngrams_count
)
>=
BATCH_NGRAMSEXTRACTION_SIZE
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
if
documents_count
%
BATCH_NGRAMSEXTRACTION_SIZE
==
0
:
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
# integrate ngrams and nodes-ngrams (le reste)
if
len
(
nodes_ngrams_count
)
>
0
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
add
(
corpus
)
session
.
commit
()
# integrate ngrams and nodes-ngrams (le reste)
if
len
(
nodes_ngrams_count
)
>
0
:
_integrate_associations
(
nodes_ngrams_count
,
ngrams_data
,
db
,
cursor
)
nodes_ngrams_count
.
clear
()
ngrams_data
.
clear
()
corpus
.
status
(
'Ngrams'
,
progress
=
documents_count
+
1
,
complete
=
True
)
corpus
.
save_hyperdata
()
session
.
commit
()
except
Exception
as
error
:
corpus
.
status
(
'Ngrams'
,
error
=
error
)
corpus
.
save_hyperdata
()
...
...
gargantext/util/toolchain/parsing.py
View file @
e5d4e175
...
...
@@ -146,14 +146,7 @@ def parse(corpus):
session
.
commit
()
#adding skipped_docs for later processsing
skipped_docs
.
append
(
document
.
id
)
#documents for this resources
session
.
add
(
corpus
)
session
.
commit
()
# update info about the resource
...
...
@@ -161,20 +154,27 @@ def parse(corpus):
#print( "resource n°",i, ":", d, "docs inside this file")
# add a corpus-level info about languages adding a __skipped__ info
print
(
len
(
skipped_docs
),
"docs skipped"
)
#skipped_docs
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
))
print
(
len
(
corpus
.
skipped_docs
),
"docs skipped"
)
skipped_langs
=
dict
(
Counter
(
skipped_languages
))
if
len
(
corpus
.
skipped_docs
)
>
0
:
print
(
"INFO in which:"
)
print
(
sum
(
skipped_langs
.
values
()),
"docs with unsupported lang"
)
print
(
corpus
.
children
(
"DOCUMENT"
)
.
count
(),
"docs parsed"
)
#
main language of the
corpus
#
language of
corpus
print
(
languages
.
items
())
corpus
.
language_id
=
sorted
(
languages
.
items
(),
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)[
0
][
0
]
print
(
corpus
.
language_id
)
languages
[
'__skipped__'
]
=
dict
(
Counter
(
skipped_languages
)
)
corpus
.
languages
=
languages
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
)
)
print
(
"Default MAIN language of CORPUS"
,
corpus
.
language_id
)
corpus
.
languages
=
dict
(
languages
)
corpus
.
languages
[
"__skipped__"
]
=
list
(
skipped_langs
.
keys
())
print
(
"Languages of CORPUS"
,
corpus
.
languages
)
corpus
.
save_hyperdata
()
session
.
commit
()
if
len
(
corpus
.
skipped_docs
)
>
0
:
print
(
sum
(
languages
[
"__skipped__"
]
.
values
()),
"docs with unsupported lang"
)
#assign main lang of the corpus to unsupported languages docs
# for d_id in corpus.skipped_docs:
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment