Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
gargantext
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
humanities
gargantext
Commits
e5d4e175
Commit
e5d4e175
authored
Aug 24, 2016
by
c24b
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
FIX TAGERBOT default_lang
parent
dbb66340
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
72 additions
and
68 deletions
+72
-68
ngrams_extraction.py
gargantext/util/toolchain/ngrams_extraction.py
+56
-52
parsing.py
gargantext/util/toolchain/parsing.py
+16
-16
No files found.
gargantext/util/toolchain/ngrams_extraction.py
View file @
e5d4e175
...
@@ -52,8 +52,12 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
...
@@ -52,8 +52,12 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
languages
if
lang
!=
"__skipped__"
}
tagger_bots
=
{
lang
:
load_tagger
(
lang
)()
for
lang
in
corpus
.
languages
if
lang
!=
"__skipped__"
}
#sort docs by lang?
#sort docs by lang?
# for lang, tagger in tagger_bots.items():
# for lang, tagger in tagger_bots.items():
for
documents_count
,
document
in
enumerate
(
docs
):
for
documents_count
,
document
in
enumerate
(
docs
):
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
language_iso2
=
document
.
hyperdata
.
get
(
'language_iso2'
)
if
language_iso2
in
source
[
"default_languages"
]:
#filtering out skipped_docsof parsing
#if document.id not in corpus.skipped_docs:
tagger
=
tagger_bots
[
language_iso2
]
tagger
=
tagger_bots
[
language_iso2
]
#print(language_iso2)
#print(language_iso2)
for
key
in
keys
:
for
key
in
keys
:
...
...
gargantext/util/toolchain/parsing.py
View file @
e5d4e175
...
@@ -146,14 +146,7 @@ def parse(corpus):
...
@@ -146,14 +146,7 @@ def parse(corpus):
session
.
commit
()
session
.
commit
()
#adding skipped_docs for later processsing
#adding skipped_docs for later processsing
skipped_docs
.
append
(
document
.
id
)
skipped_docs
.
append
(
document
.
id
)
#documents for this resources
#documents for this resources
session
.
add
(
corpus
)
session
.
add
(
corpus
)
session
.
commit
()
session
.
commit
()
# update info about the resource
# update info about the resource
...
@@ -161,20 +154,27 @@ def parse(corpus):
...
@@ -161,20 +154,27 @@ def parse(corpus):
#print( "resource n°",i, ":", d, "docs inside this file")
#print( "resource n°",i, ":", d, "docs inside this file")
# add a corpus-level info about languages adding a __skipped__ info
print
(
len
(
skipped_docs
),
"docs skipped"
)
#skipped_docs
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
))
print
(
len
(
corpus
.
skipped_docs
),
"docs skipped"
)
skipped_langs
=
dict
(
Counter
(
skipped_languages
))
if
len
(
corpus
.
skipped_docs
)
>
0
:
print
(
"INFO in which:"
)
print
(
sum
(
skipped_langs
.
values
()),
"docs with unsupported lang"
)
print
(
corpus
.
children
(
"DOCUMENT"
)
.
count
(),
"docs parsed"
)
print
(
corpus
.
children
(
"DOCUMENT"
)
.
count
(),
"docs parsed"
)
#
main language of the
corpus
#
language of
corpus
print
(
languages
.
items
())
print
(
languages
.
items
())
corpus
.
language_id
=
sorted
(
languages
.
items
(),
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)[
0
][
0
]
corpus
.
language_id
=
sorted
(
languages
.
items
(),
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)[
0
][
0
]
print
(
corpus
.
language_id
)
print
(
"Default MAIN language of CORPUS"
,
corpus
.
language_id
)
languages
[
'__skipped__'
]
=
dict
(
Counter
(
skipped_languages
)
)
corpus
.
languages
=
dict
(
languages
)
corpus
.
languages
=
languages
corpus
.
languages
[
"__skipped__"
]
=
list
(
skipped_langs
.
keys
())
corpus
.
skipped_docs
=
list
(
set
(
skipped_docs
)
)
print
(
"Languages of CORPUS"
,
corpus
.
languages
)
corpus
.
save_hyperdata
()
corpus
.
save_hyperdata
()
session
.
commit
()
session
.
commit
()
if
len
(
corpus
.
skipped_docs
)
>
0
:
print
(
sum
(
languages
[
"__skipped__"
]
.
values
()),
"docs with unsupported lang"
)
#assign main lang of the corpus to unsupported languages docs
#assign main lang of the corpus to unsupported languages docs
# for d_id in corpus.skipped_docs:
# for d_id in corpus.skipped_docs:
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment