Commit 89ed8d81 authored by delanoe's avatar delanoe

[FIX] merge

parents 412ba477 bfcabfad
...@@ -263,7 +263,7 @@ RESOURCETYPES = [ ...@@ -263,7 +263,7 @@ RESOURCETYPES = [
}, },
{ "type": 11, { "type": 11,
"name": 'HAL [API]', "name": 'HAL (english) [API]',
"parser": "HalParser", "parser": "HalParser",
"format": 'JSON', "format": 'JSON',
'file_formats':["zip","json"], 'file_formats':["zip","json"],
......
...@@ -29,17 +29,18 @@ class HalCrawler(Crawler): ...@@ -29,17 +29,18 @@ class HalCrawler(Crawler):
'''formating the query''' '''formating the query'''
#search_field="title_t" #search_field="title_t"
search_field="abstract_t" #search_field="abstract_t"
#return (search_field + ":" + "(" + query + ")") #return (search_field + ":" + "(" + query + ")")
return query # (search_field + ":" + "(" + query + ")") return "(" + query + ")"
def _get(self, query, fromPage=1, count=10, lang=None): def _get(self, query, fromPage=1, count=10, lang=None):
# Parameters # Parameters
fl = """ title_s fl = """ en_title_s
, abstract_s , en_title_s
, en_abstract_s
, submittedDate_s , submittedDate_s
, journalDate_s , journalDate_s
, authFullName_s , authFullName_s
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import subprocess import subprocess
import re import re
from .sparql import Service from .sparql import Service
from gargantext.settings import BOOL_TOOLS_PATH
#from sparql import Service #from sparql import Service
def bool2sparql(rawQuery, count=False, offset=None, limit=None): def bool2sparql(rawQuery, count=False, offset=None, limit=None):
...@@ -12,7 +13,7 @@ def bool2sparql(rawQuery, count=False, offset=None, limit=None): ...@@ -12,7 +13,7 @@ def bool2sparql(rawQuery, count=False, offset=None, limit=None):
See: https://github.com/delanoe/bool2sparql See: https://github.com/delanoe/bool2sparql
""" """
query = re.sub("\"", "\'", rawQuery) query = re.sub("\"", "\'", rawQuery)
bashCommand = ["/srv/gargantext/gargantext/util/crawlers/sparql/bool2sparql-exe","-q",query] bashCommand = [BOOL_TOOLS_PATH + "/bool2sparql-exe","-q",query]
if count is True : if count is True :
bashCommand.append("-c") bashCommand.append("-c")
......
...@@ -5,15 +5,9 @@ from gargantext.util.json import json_dumps ...@@ -5,15 +5,9 @@ from gargantext.util.json import json_dumps
######################################################################## ########################################################################
# get engine, session, etc. # get engine, session, etc.
######################################################################## ########################################################################
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import delete from sqlalchemy import delete
# To make Full Text search possible, uncomment lines below
# (and install it with pip before)
#from sqlalchemy_searchable import make_searchable
def get_engine(): def get_engine():
from sqlalchemy import create_engine from sqlalchemy import create_engine
return create_engine( settings.DATABASES['default']['URL'] return create_engine( settings.DATABASES['default']['URL']
...@@ -24,16 +18,8 @@ def get_engine(): ...@@ -24,16 +18,8 @@ def get_engine():
engine = get_engine() engine = get_engine()
# To make Full Text search possible, uncomment lines below
# https://sqlalchemy-searchable.readthedocs.io/
#sa.orm.configure_mappers()
Base = declarative_base()
#Base.metadata.create_all(engine)
#make_searchable()
session = scoped_session(sessionmaker(bind=engine)) session = scoped_session(sessionmaker(bind=engine))
######################################################################## ########################################################################
# useful for queries # useful for queries
######################################################################## ########################################################################
......
...@@ -7,7 +7,7 @@ from gargantext.util.db import session, aliased ...@@ -7,7 +7,7 @@ from gargantext.util.db import session, aliased
from gargantext.models import Ngram, NodeNgramNgram from gargantext.models import Ngram, NodeNgramNgram
from igraph import Graph # for group_union from igraph import Graph # for group_union
def query_groups(groupings_id, details=False): def query_groups(groupings_id, details=False, sort=False):
""" """
Listing of couples (mainform, subform) Listing of couples (mainform, subform)
aka (ngram1_id, ngram2_id) aka (ngram1_id, ngram2_id)
...@@ -15,24 +15,27 @@ def query_groups(groupings_id, details=False): ...@@ -15,24 +15,27 @@ def query_groups(groupings_id, details=False):
Parameter: Parameter:
- details: if False, just send the array of couples - details: if False, just send the array of couples
if True, send quadruplets with (ngram1_id, term1, ngram2_id, term2) if True, send quadruplets with (ngram1_id, term1, ngram2_id, term2)
- sort: order results by terms of ngram1 then ngram2
""" """
if details or sort:
Ngram1, Ngram2 = Ngram, aliased(Ngram)
if not details: if not details:
# simple contents # simple contents
query = session.query(NodeNgramNgram.ngram1_id, NodeNgramNgram.ngram2_id) columns = (NodeNgramNgram.ngram1_id, NodeNgramNgram.ngram2_id)
else: else:
# detailed contents (id + terms) # detailed contents (id + terms)
Ngram1 = aliased(Ngram) columns = (Ngram1.id, Ngram1.terms,
Ngram2 = aliased(Ngram) Ngram2.id, Ngram2.terms)
query = (session
.query( query = session.query(*columns)
NodeNgramNgram.ngram1_id,
Ngram1.terms, if details or sort:
NodeNgramNgram.ngram2_id, query = (query.join(Ngram1, NodeNgramNgram.ngram1_id == Ngram1.id)
Ngram2.terms, .join(Ngram2, NodeNgramNgram.ngram2_id == Ngram2.id))
)
.join(Ngram1, NodeNgramNgram.ngram1_id == Ngram1.id) if sort:
.join(Ngram2, NodeNgramNgram.ngram2_id == Ngram2.id) query = query.order_by(Ngram1.terms, Ngram2.terms)
)
# main filter # main filter
# ----------- # -----------
......
...@@ -50,6 +50,9 @@ class _BaseClass: ...@@ -50,6 +50,9 @@ class _BaseClass:
else: else:
return NotImplemented return NotImplemented
def __len__(self):
return len(self.items)
def __repr__(self): def __repr__(self):
items = self.items items = self.items
if isinstance(items, defaultdict): if isinstance(items, defaultdict):
......
...@@ -8,8 +8,7 @@ Tools to work with ngramlists (MAINLIST, MAPLIST, STOPLIST) ...@@ -8,8 +8,7 @@ Tools to work with ngramlists (MAINLIST, MAPLIST, STOPLIST)
""" """
from gargantext.util.group_tools import query_groups, group_union from gargantext.util.group_tools import query_groups, group_union
from gargantext.util.db import session, desc, func, \ from gargantext.util.db import session, bulk_insert_ifnotexists
bulk_insert_ifnotexists
from gargantext.models import Ngram, NodeNgram, NodeNodeNgram, \ from gargantext.models import Ngram, NodeNgram, NodeNodeNgram, \
NodeNgramNgram, Node NodeNgramNgram, Node
...@@ -25,7 +24,6 @@ from gargantext.util.toolchain.ngrams_extraction import normalize_forms ...@@ -25,7 +24,6 @@ from gargantext.util.toolchain.ngrams_extraction import normalize_forms
# merge will also index the new ngrams in the docs of the corpus # merge will also index the new ngrams in the docs of the corpus
from gargantext.util.toolchain.ngrams_addition import index_new_ngrams from gargantext.util.toolchain.ngrams_addition import index_new_ngrams
from sqlalchemy.sql import exists
from os import path from os import path
from csv import writer, reader, QUOTE_MINIMAL from csv import writer, reader, QUOTE_MINIMAL
from collections import defaultdict from collections import defaultdict
...@@ -35,8 +33,8 @@ from celery import shared_task ...@@ -35,8 +33,8 @@ from celery import shared_task
def query_list(list_id, def query_list(list_id,
pagination_limit=None, pagination_offset=None, pagination_limit=None, pagination_offset=None,
details=False, scoring_metric_id=None, groupings_id=None details=False, scoring_metric_id=None, groupings_id=None,
): sort=False):
""" """
Paginated listing of ngram_ids in a NodeNgram lists. Paginated listing of ngram_ids in a NodeNgram lists.
...@@ -51,6 +49,7 @@ def query_list(list_id, ...@@ -51,6 +49,7 @@ def query_list(list_id,
(for details and sorting) (for details and sorting)
- groupings_id: optional id of a list of grouping relations (synonyms) - groupings_id: optional id of a list of grouping relations (synonyms)
(each synonym will be added to the list if not already in there) (each synonym will be added to the list if not already in there)
- sort: order by Ngram.terms (not possible if details is False)
FIXME: subforms appended recently and not generalized enough FIXME: subforms appended recently and not generalized enough
=> add a common part for all "if groupings_id" => add a common part for all "if groupings_id"
...@@ -125,7 +124,10 @@ def query_list(list_id, ...@@ -125,7 +124,10 @@ def query_list(list_id,
query = query.limit(pagination_limit) query = query.limit(pagination_limit)
if pagination_offset: if pagination_offset:
query = query.offset(pagination_offsets) query = query.offset(pagination_offset)
if details and sort:
query = query.order_by(Ngram.terms)
return query return query
...@@ -186,9 +188,7 @@ def ngrams_to_csv_rows(ngram_objs, ngram_dico={}, group_infos={}, ...@@ -186,9 +188,7 @@ def ngrams_to_csv_rows(ngram_objs, ngram_dico={}, group_infos={},
# 3 columns = |status, | mainform, | forms # 3 columns = |status, | mainform, | forms
# (type_of_list) ( term ) ( subterm1|&|subterm2 ) # (type_of_list) ( term ) ( subterm1|&|subterm2 )
csv_rows.append( csv_rows.append([list_type, ng_obj.terms, this_grouped_terms])
[list_type,ng_obj.terms,this_grouped_terms]
)
return csv_rows return csv_rows
...@@ -231,9 +231,10 @@ def export_ngramlists(node,fname=None,delimiter=DEFAULT_CSV_DELIM,titles=True): ...@@ -231,9 +231,10 @@ def export_ngramlists(node,fname=None,delimiter=DEFAULT_CSV_DELIM,titles=True):
# listes de ngram_ids correspondantes # listes de ngram_ids correspondantes
# ------------------------------------ # ------------------------------------
# contenu: liste des objets ngrammes [(2562,"monterme",1),...] # contenu: liste des objets ngrammes [(2562,"monterme",1),...]
stop_ngrams = query_list(stoplist_node.id, details=True, groupings_id=group_node.id).all() stop_ngrams, main_ngrams, map_ngrams = (
main_ngrams = query_list(mainlist_node.id, details=True, groupings_id=group_node.id).all() query_list(n.id, details=True, groupings_id=group_node.id, sort=True).all()
map_ngrams = query_list(maplist_node.id, details=True, groupings_id=group_node.id).all() for n in (stoplist_node, mainlist_node, maplist_node)
)
# pour debug ---------->8 -------------------- # pour debug ---------->8 --------------------
#~ stop_ngrams = stop_ngrams[0:10] #~ stop_ngrams = stop_ngrams[0:10]
...@@ -250,7 +251,7 @@ def export_ngramlists(node,fname=None,delimiter=DEFAULT_CSV_DELIM,titles=True): ...@@ -250,7 +251,7 @@ def export_ngramlists(node,fname=None,delimiter=DEFAULT_CSV_DELIM,titles=True):
# for the groups we got couples of ids in the DB # for the groups we got couples of ids in the DB
# ------------------- # -------------------
# ex: [(3544, 2353), (2787, 4032), ...] # ex: [(3544, 2353), (2787, 4032), ...]
group_ngram_id_couples = query_groups(group_node.id).all() group_ngram_id_couples = query_groups(group_node.id, sort=True)
# we expend this to double structure for groups lookup # we expend this to double structure for groups lookup
# 1) g['links'] = k couples (x,y_i) as a set [x => {y1,y2}] # 1) g['links'] = k couples (x,y_i) as a set [x => {y1,y2}]
...@@ -397,6 +398,9 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM, ...@@ -397,6 +398,9 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM,
NB: To merge the imported lists into a corpus node's lists, NB: To merge the imported lists into a corpus node's lists,
chain this function with merge_ngramlists() chain this function with merge_ngramlists()
''' '''
list_types = ['stop','main','map']
# --------------- # ---------------
# ngram storage # ngram storage
# --------------- # ---------------
...@@ -461,7 +465,6 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM, ...@@ -461,7 +465,6 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM,
# headers # headers
if i == 0: if i == 0:
n_cols = len(csv_row)
for j, colname in enumerate(csv_row): for j, colname in enumerate(csv_row):
if colname in ['label', 'status', 'forms']: if colname in ['label', 'status', 'forms']:
columns[colname] = j columns[colname] = j
...@@ -508,31 +511,30 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM, ...@@ -508,31 +511,30 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM,
continue continue
# --- check correct list type # --- check correct list type
if not this_list_type in ['stop','main','map']: if not this_list_type in list_types:
print("IMPORT WARN: (skip line) wrong list type at CSV %s:l.%i" % (fname, i)) print("IMPORT WARN: (skip line) wrong list type at CSV %s:l.%i" % (fname, i))
continue continue
# subforms can be duplicated (in forms and another label) # subforms can be duplicated (in forms and another label)
# but we must take care of unwanted other duplicates too # but we must take care of unwanted other duplicates too
if this_row_label in imported_unique_ngramstrs: if imported_unique_ngramstrs.get(this_row_label) == 1:
print("TODO IMPORT DUPL: (skip line) term appears more than once at CSV %s:l.%i" print("TODO IMPORT DUPL: (skip line) term %r appears more than once at CSV %s:l.%i"
% (fname, i)) % (this_row_label, fname, i))
# ================= Store the data ==================== # ================= Store the data ====================
# the ngram census # the ngram census
imported_unique_ngramstrs[this_row_label] = True imported_unique_ngramstrs[this_row_label] = 1
# and the "list to ngram" relation # and the "list to ngram" relation
imported_nodes_ngrams[this_list_type].append(this_row_label) imported_nodes_ngrams[this_list_type].append(this_row_label)
# ====== Store synonyms from the import (if any) ====== # ====== Store synonyms from the import (if any) ======
if len(this_row_forms) != 0: if len(this_row_forms) != 0:
other_terms = []
for raw_term_str in this_row_forms.split(group_delimiter): for raw_term_str in this_row_forms.split(group_delimiter):
# each subform is also like an ngram declaration # each subform is also like an ngram declaration
term_str = normalize_forms(normalize_chars(raw_term_str)) term_str = normalize_forms(normalize_chars(raw_term_str))
imported_unique_ngramstrs[term_str] = True imported_unique_ngramstrs[term_str] = 2
imported_nodes_ngrams[this_list_type].append(term_str) imported_nodes_ngrams[this_list_type].append(term_str)
# the optional repeated mainform doesn't interest us # the optional repeated mainform doesn't interest us
...@@ -610,7 +612,10 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM, ...@@ -610,7 +612,10 @@ def import_ngramlists(the_file, delimiter=DEFAULT_CSV_DELIM,
% (n_total_ng, n_added_ng, n_total_ng-n_added_ng) ) % (n_total_ng, n_added_ng, n_total_ng-n_added_ng) )
print("IMPORT: read %i grouping relations" % n_group_relations) print("IMPORT: read %i grouping relations" % n_group_relations)
# print("IMPORT RESULT", result) list_counts = [(typ, len(result.get(typ))) for typ in list_types]
list_counts.append(('total', sum(x[1] for x in list_counts)))
print("IMPORT: " + '; '.join('%s %s' % stats for stats in list_counts))
return result return result
def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]): def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]):
...@@ -718,9 +723,11 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]): ...@@ -718,9 +723,11 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]):
# ======== Merging all involved ngrams ========= # ======== Merging all involved ngrams =========
# all memberships with resolved conflicts of interfering memberships # all ngram memberships with resolved conflicts of interfering memberships
# (associates ngram ids with list types -- see linfos definition above)
resolved_memberships = {} resolved_memberships = {}
# iterates over each ngram of each list type for both old and new lists
for list_set in [old_lists, new_lists]: for list_set in [old_lists, new_lists]:
for lid, info in enumerate(linfos): for lid, info in enumerate(linfos):
list_type = info['key'] list_type = info['key']
...@@ -749,12 +756,15 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]): ...@@ -749,12 +756,15 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]):
# ======== Merging old and new groups ========= # ======== Merging old and new groups =========
# get the arcs already in the target DB (directed couples) # get the arcs already in the target DB (directed couples)
previous_links = session.query( if 'groupings' in del_originals:
NodeNgramNgram.ngram1_id, previous_links = []
NodeNgramNgram.ngram2_id else:
).filter( previous_links = session.query(
NodeNgramNgram.node_id == old_group_id NodeNgramNgram.ngram1_id,
).all() NodeNgramNgram.ngram2_id
).filter(
NodeNgramNgram.node_id == old_group_id
).all()
n_links_previous = len(previous_links) n_links_previous = len(previous_links)
...@@ -822,7 +832,7 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]): ...@@ -822,7 +832,7 @@ def merge_ngramlists(new_lists={}, onto_corpus=None, del_originals=[]):
list_type = linfos[lid]['key'] list_type = linfos[lid]['key']
merged_results[list_type].items.add(ng_id) merged_results[list_type].items.add(ng_id)
# print("IMPORT: added %i elements in the lists indices" % added_nd_ng) print("IMPORT: added %i elements in the lists indices" % added_nd_ng)
# ======== Overwrite old data with new ========= # ======== Overwrite old data with new =========
for lid, info in enumerate(linfos): for lid, info in enumerate(linfos):
...@@ -845,13 +855,17 @@ def import_and_merge_ngramlists(file_contents, onto_corpus_id, overwrite=False): ...@@ -845,13 +855,17 @@ def import_and_merge_ngramlists(file_contents, onto_corpus_id, overwrite=False):
""" """
A single function to run import_ngramlists and merge_ngramlists together A single function to run import_ngramlists and merge_ngramlists together
""" """
print("import list")
print("IMPORT CSV termlists file with %s lines in corpus %s (%s)" % (
len(file_contents),
onto_corpus_id, 'overwrite' if overwrite else 'merge'))
new_lists = import_ngramlists(file_contents) new_lists = import_ngramlists(file_contents)
corpus_node = session.query(Node).filter(Node.id == onto_corpus_id).first() corpus_node = session.query(Node).get(onto_corpus_id)
# merge the new_lists onto those of the target corpus # merge the new_lists onto those of the target corpus
del_originals = ['stop', 'main', 'map'] if overwrite else [] del_originals = ['stop', 'main', 'map', 'groupings'] if overwrite else []
log_msg = merge_ngramlists(new_lists, onto_corpus=corpus_node, del_originals=del_originals) log_msg = merge_ngramlists(new_lists, onto_corpus=corpus_node, del_originals=del_originals)
return log_msg return log_msg
...@@ -4,128 +4,67 @@ import sys ...@@ -4,128 +4,67 @@ import sys
import csv import csv
csv.field_size_limit(sys.maxsize) csv.field_size_limit(sys.maxsize)
import numpy as np import numpy as np
import os
class CSVParser(Parser): class CSVParser(Parser):
DELIMITERS = ", \t;|:"
def CSVsample( self, small_contents , delim) : def detect_delimiter(self, lines, sample_size=10):
reader = csv.reader(small_contents, delimiter=delim) sample = lines[:sample_size]
Freqs = [] # Compute frequency of each delimiter on each input line
for row in reader: delimiters_freqs = {
Freqs.append(len(row)) d: [line.count(d) for line in sample]
for d in self.DELIMITERS
}
return Freqs # Select delimiters with a standard deviation of zero, ie. delimiters
# for which we have the same number of fields on each line
selected_delimiters = [
(d, np.sum(freqs))
for d, freqs in delimiters_freqs.items()
if any(freqs) and np.std(freqs) == 0
]
if selected_delimiters:
# Choose the delimiter with highest frequency amongst selected ones
sorted_delimiters = sorted(selected_delimiters, key=lambda x: x[1])
return sorted_delimiters[-1][0]
def parse(self, filebuf): def parse(self, filebuf):
print("CSV: parsing (assuming UTF-8 and LF line endings)") print("CSV: parsing (assuming UTF-8 and LF line endings)")
contents = filebuf.read().decode("UTF-8").split("\n") contents = filebuf.read().decode("UTF-8").split("\n")
sample_size = 10 # Filter out empty lines
sample_contents = contents[0:sample_size] contents = [line for line in contents if line.strip()]
hyperdata_list = [] # Delimiter auto-detection
delimiter = self.detect_delimiter(contents, sample_size=10)
# # = = = = [ Getting delimiters frequency ] = = = = #
PossibleDelimiters = [ ',',' ','\t', ';', '|', ':' ] if delimiter is None:
AllDelimiters = {} raise ValueError("CSV: couldn't detect delimiter, bug or malformed data")
for delim in PossibleDelimiters:
AllDelimiters[delim] = self.CSVsample( sample_contents , delim ) print("CSV: selected delimiter: %r" % delimiter)
# # = = = = [ / Getting delimiters frequency ] = = = = #
# # OUTPUT example: # Parse CSV
# # AllDelimiters = { reader = csv.reader(contents, delimiter=delimiter)
# # '\t': [1, 1, 1, 1, 1],
# # ' ': [1, 13, 261, 348, 330], # Get first not empty row and its fields (ie. header row), or (0, [])
# # ',': [15, 15, 15, 15, 15], first_row, headers = \
# # ';': [1, 1, 1, 1, 1], next(((i, fields) for i, fields in enumerate(reader) if any(fields)),
# # '|': [1, 1, 1, 1, 1] (0, []))
# # }
# Get first not empty column of the first row, or 0
# # = = = = [ Stand.Dev=0 & Sum of delimiters ] = = = = # first_col = next((i for i, field in enumerate(headers) if field), 0)
Delimiters = []
for d in AllDelimiters: # Strip out potential empty fields in headers
freqs = AllDelimiters[d] headers = headers[first_col:]
suma = np.sum( freqs )
if suma >0:
std = np.std( freqs )
# print [ d , suma , len(freqs) , std]
if std == 0:
Delimiters.append ( [ d , suma , len(freqs) , std] )
# # = = = = [ / Stand.Dev=0 & Sum of delimiters ] = = = = #
# # OUTPUT example:
# # Delimiters = [
# # ['\t', 5, 5, 0.0],
# # [',', 75, 5, 0.0],
# # ['|', 5, 5, 0.0]
# # ]
# # = = = = [ Delimiter selection ] = = = = #
Sorted_Delims = sorted(Delimiters, key=lambda x: x[1], reverse=True)
HighestDelim = Sorted_Delims[0][0]
# HighestDelim = ","
print("CSV selected delimiter:",[HighestDelim])
# # = = = = [ / Delimiter selection ] = = = = #
# # = = = = [ First data coordinate ] = = = = #
Coords = {
"row": -1,
"column": -1
}
reader = csv.reader(contents, delimiter=HighestDelim) # Return a generator of dictionaries with column labels as keys,
# filtering out empty rows
for rownum, tokens in enumerate(reader): for i, fields in enumerate(reader):
if rownum % 250 == 0: if i % 500 == 0:
print("CSV row: ", rownum) print("CSV: parsing row #%s..." % (i+1))
joined_tokens = "".join (tokens) if any(fields):
if Coords["row"]<0 and len( joined_tokens )>0 : yield dict(zip(headers, fields[first_col:]))
Coords["row"] = rownum
for columnum in range(len(tokens)):
t = tokens[columnum]
if len(t)>0:
Coords["column"] = columnum
break
# # = = = = [ / First data coordinate ] = = = = #
# # = = = = [ Setting Headers ] = = = = #
Headers_Int2Str = {}
reader = csv.reader(contents, delimiter=HighestDelim)
for rownum, tokens in enumerate(reader):
if rownum>=Coords["row"]:
for columnum in range( Coords["column"],len(tokens) ):
t = tokens[columnum]
Headers_Int2Str[columnum] = t
break
# print("Headers_Int2Str")
# print(Headers_Int2Str)
# # = = = = [ / Setting Headers ] = = = = #
# # OUTPUT example:
# # Headers_Int2Str = {
# # 0: 'publication_date',
# # 1: 'publication_month',
# # 2: 'publication_second',
# # 3: 'abstract'
# # }
# # = = = = [ Reading the whole CSV and saving ] = = = = #
hyperdata_list = []
reader = csv.reader(contents, delimiter=HighestDelim)
for rownum, tokens in enumerate(reader):
if rownum>Coords["row"]:
RecordDict = {}
for columnum in range( Coords["column"],len(tokens) ):
data = tokens[columnum]
RecordDict[ Headers_Int2Str[columnum] ] = data
if len(RecordDict.keys())>0:
hyperdata_list.append( RecordDict )
# # = = = = [ / Reading the whole CSV and saving ] = = = = #
return hyperdata_list
...@@ -16,8 +16,8 @@ class HalParser(Parser): ...@@ -16,8 +16,8 @@ class HalParser(Parser):
hyperdata_list = [] hyperdata_list = []
hyperdata_path = { "id" : "isbn_s" hyperdata_path = { "id" : "isbn_s"
, "title" : "title_s" , "title" : "en_title_s"
, "abstract" : "abstract_s" , "abstract" : "en_abstract_s"
, "source" : "journalTitle_s" , "source" : "journalTitle_s"
, "url" : "uri_s" , "url" : "uri_s"
, "authors" : "authFullName_s" , "authors" : "authFullName_s"
......
...@@ -81,44 +81,45 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND ...@@ -81,44 +81,45 @@ def extract_ngrams(corpus, keys=DEFAULT_INDEX_FIELDS, do_subngrams = DEFAULT_IND
corpus.hyperdata["skipped_docs"].append(document.id) corpus.hyperdata["skipped_docs"].append(document.id)
corpus.save_hyperdata() corpus.save_hyperdata()
continue continue
else:
# ready ! # ready !
tagger = tagger_bots[language_iso2] tagger = tagger_bots[language_iso2]
# to do verify if document has no KEYS to index # to do verify if document has no KEYS to index
# eg: use set intersect (+ loop becomes direct! with no continue) # eg: use set intersect (+ loop becomes direct! with no continue)
for key in keys: for key in keys:
try: try:
value = document.hyperdata[str(key)] value = document.hyperdata[str(key)]
if not isinstance(value, str): if not isinstance(value, str):
#print("DBG wrong content in doc for key", key) #print("DBG wrong content in doc for key", key)
continue
# get ngrams
for ngram in tagger.extract(value):
tokens = tuple(normalize_forms(token[0]) for token in ngram)
if do_subngrams:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],...]
subterms = subsequences(tokens)
else:
subterms = [tokens]
for seqterm in subterms:
ngram = ' '.join(seqterm)
nbwords = len(seqterm)
nbchars = len(ngram)
if nbchars > 1:
if nbchars > 255:
# max ngram length (DB constraint)
ngram = ngram[:255]
# doc <=> ngram index
nodes_ngrams_count[(document.id, ngram)] += 1
# add fields : terms n
ngrams_data.add((ngram, nbwords, ))
except:
#value not in doc
continue continue
# get ngrams
for ngram in tagger.extract(value):
normal_forms = (normalize_forms(t[0]) for t in ngram)
tokens = tuple(nf for nf in normal_forms if nf)
if do_subngrams:
# ex tokens = ["very", "cool", "exemple"]
# subterms = [['very', 'cool'],...]
subterms = subsequences(tokens)
else:
subterms = [tokens]
for seqterm in subterms:
ngram = ' '.join(seqterm)
nbwords = len(seqterm)
nbchars = len(ngram)
if nbchars > 1:
if nbchars > 255:
# max ngram length (DB constraint)
ngram = ngram[:255]
# doc <=> ngram index
nodes_ngrams_count[(document.id, ngram)] += 1
# add fields : terms n
ngrams_data.add((ngram, nbwords, ))
except:
#value not in doc
continue
# integrate ngrams and nodes-ngrams # integrate ngrams and nodes-ngrams
if len(nodes_ngrams_count) >= BATCH_NGRAMSEXTRACTION_SIZE: if len(nodes_ngrams_count) >= BATCH_NGRAMSEXTRACTION_SIZE:
......
...@@ -155,7 +155,12 @@ class CSVLists(APIView): ...@@ -155,7 +155,12 @@ class CSVLists(APIView):
try: try:
# merge the source_lists onto those of the target corpus # merge the source_lists onto those of the target corpus
delete = todo_lists if bool(params.get('overwrite')) else [] delete = todo_lists if bool(params.get('overwrite')) else []
if len(delete) == len(list_types):
delete.append('groupings')
log_msg = merge_ngramlists(source_lists, onto_corpus=corpus_node, del_originals=delete) log_msg = merge_ngramlists(source_lists, onto_corpus=corpus_node, del_originals=delete)
return JsonHttpResponse({ return JsonHttpResponse({
'log': log_msg, 'log': log_msg,
}, 200) }, 200)
......
from django.conf.urls import url from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
from . import nodes from . import nodes
from . import projects from . import projects
from . import corpora from . import corpora
...@@ -10,78 +12,81 @@ from . import ngramlists ...@@ -10,78 +12,81 @@ from . import ngramlists
from . import analytics from . import analytics
from graph.rest import Graph from graph.rest import Graph
urlpatterns = [ url(r'^nodes$' , nodes.NodeListResource.as_view() ) urlpatterns = [ url(r'^nodes$' , nodes.NodeListResource.as_view())
, url(r'^nodes/(\d+)$' , nodes.NodeResource.as_view() ) , url(r'^nodes/(\d+)$' , nodes.NodeResource.as_view())
, url(r'^nodes/(\d+)/having$' , nodes.NodeListHaving.as_view() ) , url(r'^nodes/(\d+)/having$' , nodes.NodeListHaving.as_view())
, url(r'^nodes/(\d+)/status$' , nodes.Status.as_view() ) , url(r'^nodes/(\d+)/status$' , nodes.Status.as_view())
#Projects
, url(r'^projects$' , projects.ProjectList.as_view() ) # Projects
, url(r'^projects/(\d+)$' , projects.ProjectView.as_view() ) , url(r'^projects$' , projects.ProjectList.as_view())
#?view=resource , url(r'^projects/(\d+)$' , projects.ProjectView.as_view())
#?view=docs
#Corpora # Corpora
, url(r'^projects/(\d+)/corpora/(\d+)$' , corpora.CorpusView.as_view() ) , url(r'^projects/(\d+)/corpora/(\d+)$', corpora.CorpusView.as_view())
#?view=source
#?view=title # Sources
#?view=analytics #, url(r'^projects/(\d+)/corpora/(\d+)/sources$', corpora.CorpusSources.as_view())
#Sources #, url(r'^projects/(\d+)/corpora/(\d+)/sources/(\d+)$ , corpora.CorpusSourceView.as_view())
#, url(r'^projects/(\d+)/corpora/(\d+)/sources$' , corpora.CorpusSources.as_view() )
#, url(r'^projects/(\d+)/corpora/(\d+)/sources/(\d+)$' , corpora.CorpusSourceView.as_view() ) # Facets
#Facets , url(r'^projects/(\d+)/corpora/(\d+)/facets$', nodes.CorpusFacet.as_view())
, url(r'^projects/(\d+)/corpora/(\d+)/facets$' , nodes.CorpusFacet.as_view() )
#Favorites # Favorites
, url(r'^projects/(\d+)/corpora/(\d+)/favorites$', nodes.CorpusFavorites.as_view() ) , url(r'^projects/(\d+)/corpora/(\d+)/favorites$', nodes.CorpusFavorites.as_view())
#Metrics
, url(r'^projects/(\d+)/corpora/(\d+)/metrics$', metrics.CorpusMetrics.as_view() ) # Metrics
#GraphExplorer , url(r'^projects/(\d+)/corpora/(\d+)/metrics$', metrics.CorpusMetrics.as_view())
, url(r'^projects/(\d+)/corpora/(\d+)/explorer$' , Graph.as_view())
# GraphExplorer
, url(r'^projects/(\d+)/corpora/(\d+)/explorer$', Graph.as_view())
# data for graph explorer (json) # data for graph explorer (json)
# GET /api/projects/43198/corpora/111107/explorer? # GET /api/projects/43198/corpora/111107/explorer?
# Corresponding view is : /projects/43198/corpora/111107/explorer? # Corresponding view is : /projects/43198/corpora/111107/explorer?
# Parameters (example): # Parameters (example):
# explorer?field1=ngrams&field2=ngrams&distance=conditional&bridgeness=5&start=1996-6-1&end=2002-10-5 # explorer?field1=ngrams&field2=ngrams&distance=conditional&bridgeness=5&start=1996-6-1&end=2002-10-5
# Ngrams # Ngrams
, url(r'^ngrams/?$' , ngrams.ApiNgrams.as_view() ) , url(r'^ngrams/?$' , ngrams.ApiNgrams.as_view())
# Analytics # Analytics
, url(r'^nodes/(\d+)/histories$', analytics.NodeNgramsQueries.as_view()) , url(r'^nodes/(\d+)/histories$', analytics.NodeNgramsQueries.as_view())
, url(r'hyperdata$' , analytics.ApiHyperdata.as_view() ) , url(r'hyperdata$' , analytics.ApiHyperdata.as_view())
# get a list of ngram_ids or ngram_infos by list_id # get a list of ngram_ids or ngram_infos by list_id
# url(r'^ngramlists/(\d+)$', ngramlists.List.as_view()), # url(r'^ngramlists/(\d+)$', ngramlists.List.as_view()),
, url(r'^nodes/(\d+)/facets$' , nodes.CorpusFacet.as_view() ) , url(r'^nodes/(\d+)/facets$' , nodes.CorpusFacet.as_view())
, url(r'^nodes/(\d+)/favorites$', nodes.CorpusFavorites.as_view() ) , url(r'^nodes/(\d+)/favorites$', nodes.CorpusFavorites.as_view())
# in these two routes the node is supposed to be a *corpus* node # in these two routes the node is supposed to be a *corpus* node
, url(r'^metrics/(\d+)$', metrics.CorpusMetrics.as_view() ) , url(r'^metrics/(\d+)$' , metrics.CorpusMetrics.as_view())
# update all metrics for a corpus # update all metrics for a corpus
# ex: PUT metrics/123 # ex: PUT metrics/123
# \ # \
# corpus id # corpus id
, url(r'^ngramlists/export$', ngramlists.CSVLists.as_view() ) , url(r'^ngramlists/export$', ngramlists.CSVLists.as_view())
# get a CSV export of the ngramlists of a corpus # get a CSV export of the ngramlists of a corpus
# ex: GET ngramlists/export?corpus=43 # ex: GET ngramlists/export?corpus=43
# TODO : unify to a /api/ngrams?formatted=csv # TODO : unify to a /api/ngrams?formatted=csv
# (similar to /api/nodes?formatted=csv) # (similar to /api/nodes?formatted=csv)
, url(r'^ngramlists/import$', ngramlists.CSVLists.as_view() ) , url(r'^ngramlists/import$', ngramlists.CSVLists.as_view())
# same handling class as export (CSVLists) # same handling class as export (CSVLists)
# but this route used only for POST + file # but this route used only for POST + file
# or PATCH + other corpus id # or PATCH + other corpus id
, url(r'^ngramlists/change$', ngramlists.ListChange.as_view() ) , url(r'^ngramlists/change$', ngramlists.ListChange.as_view())
# add or remove ngram from a list # add or remove ngram from a list
# ex: add <=> PUT ngramlists/change?list=42&ngrams=1,2 # ex: add <=> PUT ngramlists/change?list=42&ngrams=1,2
# rm <=> DEL ngramlists/change?list=42&ngrams=1,2 # rm <=> DEL ngramlists/change?list=42&ngrams=1,2
, url(r'^ngramlists/groups$', ngramlists.GroupChange.as_view() ) , url(r'^ngramlists/groups$', ngramlists.GroupChange.as_view())
# modify grouping couples of a group node # modify grouping couples of a group node
# ex: PUT/DEL ngramlists/groups?node=43 # ex: PUT/DEL ngramlists/groups?node=43
# & group data also in url: 767[]=209,640 & 779[]=436,265,385 # & group data also in url: 767[]=209,640 & 779[]=436,265,385
, url(r'^ngramlists/family$' , ngramlists.ListFamily.as_view() ) , url(r'^ngramlists/family$', ngramlists.ListFamily.as_view())
# entire combination of lists from a corpus, dedicated to termtable # entire combination of lists from a corpus, dedicated to termtable
# (or any combination of lists that go together : # (or any combination of lists that go together :
# - a mainlist # - a mainlist
...@@ -89,8 +94,11 @@ urlpatterns = [ url(r'^nodes$' , nodes.NodeListResource.as_view() ...@@ -89,8 +94,11 @@ urlpatterns = [ url(r'^nodes$' , nodes.NodeListResource.as_view()
# - an optional maplist # - an optional maplist
# - an optional grouplist # - an optional grouplist
, url(r'^ngramlists/maplist$' , ngramlists.MapListGlance.as_view() ) , url(r'^ngramlists/maplist$', ngramlists.MapListGlance.as_view())
# fast access to maplist, similarly formatted for termtable # fast access to maplist, similarly formatted for termtable
, url(r'^user/parameters/$', users.UserParameters.as_view())
, url(r'^user/parameters/$', users.UserParameters.as_view())
, url('^auth/token$', obtain_jwt_token)
] ]
...@@ -11,6 +11,7 @@ django-celery==3.2.1 ...@@ -11,6 +11,7 @@ django-celery==3.2.1
django-pgfields==1.4.4 django-pgfields==1.4.4
django-pgjsonb==0.0.23 django-pgjsonb==0.0.23
djangorestframework==3.5.3 djangorestframework==3.5.3
djangorestframework-jwt==1.9.0
html5lib==0.9999999 html5lib==0.9999999
python-igraph>=0.7.1 python-igraph>=0.7.1
jdatetime==1.7.2 jdatetime==1.7.2
......
...@@ -440,11 +440,12 @@ ...@@ -440,11 +440,12 @@
// in the form "Add a corpus" // in the form "Add a corpus"
var type = $("#id_type").val() var type = $("#id_type").val()
var file = $("#id_file").val()
// 5 booleans // 5 booleans
var nameField = $("#id_name").val() != "" var nameField = $("#id_name").val() != ""
var typeField = (type != "") && (type != "0") var typeField = (type != "") && (type != "0")
var fileField = $("#id_file").val() != "" var fileField = file != ""
var wantfileField = $("#file_yes").prop("checked") var wantfileField = $("#file_yes").prop("checked")
var crawling = ((type==3)||(type==8)||(type==9)) && ! wantfileField var crawling = ((type==3)||(type==8)||(type==9)) && ! wantfileField
...@@ -457,6 +458,23 @@ ...@@ -457,6 +458,23 @@
if (! crawling) { if (! crawling) {
$("#submit_thing").prop('disabled' , !(nameField && typeField && fileField)) $("#submit_thing").prop('disabled' , !(nameField && typeField && fileField))
} }
// Automatically select CSV when type is undefined
// and we have a .csv file
if (!typeField && file && file.match(/.csv$/i)) {
// Get CSV type id
var csv = $('#id_type > option')
.filter(function() {
return $(this).text() === 'CSV'
})
.attr('value')
// Select CSV type
$('#id_type').val(csv)
// Focus on name field
setTimeout(function() {
$("#id_name").focus()
})
}
} }
function bringDaNoise() { function bringDaNoise() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment