Commit e5d4e175 authored by c24b's avatar c24b

FIX TAGERBOT default_lang

parent dbb66340
......@@ -52,62 +52,66 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
tagger_bots = {lang: load_tagger(lang)() for lang in corpus.languages if lang != "__skipped__"}
#sort docs by lang?
# for lang, tagger in tagger_bots.items():
for documents_count, document in enumerate(docs):
language_iso2 = document.hyperdata.get('language_iso2')
tagger = tagger_bots[language_iso2]
#print(language_iso2)
for key in keys:
try:
value = document[str(key)]
if not isinstance(value, str):
continue
# get ngrams
for ngram in tagger.extract(value):
tokens = tuple(normalize_forms(token[0]) for token in ngram)
if do_subngrams:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms = subsequences(tokens)
else:
subterms = [tokens]
for seqterm in subterms:
ngram = ' '.join(seqterm)
if len(ngram) > 1:
# doc <=> ngram index
nodes_ngrams_count[(document.id, ngram)] += 1
# add fields : terms n
ngrams_data.add((ngram[:255], len(seqterm), ))
except:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if len(nodes_ngrams_count) >= BATCH_NGRAMSEXTRACTION_SIZE:
_integrate_associations(nodes_ngrams_count, ngrams_data, db, cursor)
nodes_ngrams_count.clear()
ngrams_data.clear()
if documents_count % BATCH_NGRAMSEXTRACTION_SIZE == 0:
corpus.status('Ngrams', progress=documents_count+1)
if language_iso2 in source["default_languages"]:
#filtering out skipped_docsof parsing
#if document.id not in corpus.skipped_docs:
tagger = tagger_bots[language_iso2]
#print(language_iso2)
for key in keys:
try:
value = document[str(key)]
if not isinstance(value, str):
continue
# get ngrams
for ngram in tagger.extract(value):
tokens = tuple(normalize_forms(token[0]) for token in ngram)
if do_subngrams:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],
# ['very', 'cool', 'exemple'],
# ['cool', 'exemple']]
subterms = subsequences(tokens)
else:
subterms = [tokens]
for seqterm in subterms:
ngram = ' '.join(seqterm)
if len(ngram) > 1:
# doc <=> ngram index
nodes_ngrams_count[(document.id, ngram)] += 1
# add fields : terms n
ngrams_data.add((ngram[:255], len(seqterm), ))
except:
#value not in doc
pass
# except AttributeError:
# print("ERROR NO language_iso2")
# document.status("NGRAMS", error="No lang detected skipped Ngrams")
# corpus.skipped_docs.append(document.id)
# integrate ngrams and nodes-ngrams
if len(nodes_ngrams_count) >= BATCH_NGRAMSEXTRACTION_SIZE:
_integrate_associations(nodes_ngrams_count, ngrams_data, db, cursor)
nodes_ngrams_count.clear()
ngrams_data.clear()
if documents_count % BATCH_NGRAMSEXTRACTION_SIZE == 0:
corpus.status('Ngrams', progress=documents_count+1)
corpus.save_hyperdata()
session.add(corpus)
session.commit()
# integrate ngrams and nodes-ngrams (le reste)
if len(nodes_ngrams_count) > 0:
_integrate_associations(nodes_ngrams_count, ngrams_data, db, cursor)
nodes_ngrams_count.clear()
ngrams_data.clear()
corpus.status('Ngrams', progress=documents_count+1, complete=True)
corpus.save_hyperdata()
session.add(corpus)
session.commit()
# integrate ngrams and nodes-ngrams (le reste)
if len(nodes_ngrams_count) > 0:
_integrate_associations(nodes_ngrams_count, ngrams_data, db, cursor)
nodes_ngrams_count.clear()
ngrams_data.clear()
corpus.status('Ngrams', progress=documents_count+1, complete=True)
corpus.save_hyperdata()
session.commit()
except Exception as error:
corpus.status('Ngrams', error=error)
corpus.save_hyperdata()
......
......@@ -146,14 +146,7 @@ def parse(corpus):
session.commit()
#adding skipped_docs for later processsing
skipped_docs.append(document.id)
#documents for this resources
session.add(corpus)
session.commit()
# update info about the resource
......@@ -161,20 +154,27 @@ def parse(corpus):
#print( "resource n°",i, ":", d, "docs inside this file")
# add a corpus-level info about languages adding a __skipped__ info
print(len(skipped_docs), "docs skipped")
#skipped_docs
corpus.skipped_docs = list(set(skipped_docs))
print(len(corpus.skipped_docs), "docs skipped")
skipped_langs = dict(Counter(skipped_languages))
if len(corpus.skipped_docs) > 0:
print ("INFO in which:")
print (sum(skipped_langs.values()), "docs with unsupported lang")
print(corpus.children("DOCUMENT").count(), "docs parsed")
#main language of the corpus
#language of corpus
print(languages.items())
corpus.language_id = sorted(languages.items(), key = lambda x: x[1], reverse=True)[0][0]
print(corpus.language_id)
languages['__skipped__'] = dict(Counter(skipped_languages))
corpus.languages = languages
corpus.skipped_docs = list(set(skipped_docs))
print("Default MAIN language of CORPUS", corpus.language_id)
corpus.languages = dict(languages)
corpus.languages["__skipped__"] = list(skipped_langs.keys())
print("Languages of CORPUS", corpus.languages)
corpus.save_hyperdata()
session.commit()
if len(corpus.skipped_docs) > 0:
print (sum(languages["__skipped__"].values()), "docs with unsupported lang")
#assign main lang of the corpus to unsupported languages docs
# for d_id in corpus.skipped_docs:
# document = session.query(Node).filter(Node.id == d_id, Node.typename == "DOCUMENT").first()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment