Commit 4d31965b authored by Administrator's avatar Administrator

[FIX] Merge prod

parents d8098b32 38c315c0
__pycache__/
parsing/Taggers/treetagger/
.ipynb_checkpoints/
*.pyc
data_samples
VENV
from node.models import Language, ResourceType, Resource, \
Node, NodeType, Node_Resource, Project, Corpus, \
Node_Ngram, NodeNgramNgram
Node_Ngram, NodeNgramNgram, NodeNodeNgram
from collections import defaultdict
from django.db import connection, transaction
from math import log
def create_blacklist(user, corpus):
pass
......@@ -133,7 +135,7 @@ def create_cooc(user=None, corpus=None, whitelist=None, size=150, year_start=Non
cursor.execute(query_cooc)
return cooc
def get_cooc(request=None, corpus_id=None, cooc_id=None, type=None, n=150):
def get_cooc(request=None, corpus_id=None, cooc_id=None, type='node_link', n=150):
import pandas as pd
from copy import copy
import numpy as np
......@@ -144,6 +146,7 @@ def get_cooc(request=None, corpus_id=None, cooc_id=None, type=None, n=150):
from analysis.louvain import best_partition
matrix = defaultdict(lambda : defaultdict(float))
ids = dict()
labels = dict()
weight = dict()
......@@ -159,9 +162,13 @@ def get_cooc(request=None, corpus_id=None, cooc_id=None, type=None, n=150):
cooccurrence_node = Node.objects.filter(type=type_cooc, parent=corpus).first()
for cooccurrence in NodeNgramNgram.objects.filter(node=cooccurrence_node):
ids[cooccurrence.ngramx.terms] = cooccurrence.ngramx.id
ids[cooccurrence.ngramy.terms] = cooccurrence.ngramy.id
labels[cooccurrence.ngramx.id] = cooccurrence.ngramx.terms
labels[cooccurrence.ngramy.id] = cooccurrence.ngramy.terms
matrix[cooccurrence.ngramx.id][cooccurrence.ngramy.id] = cooccurrence.score
matrix[cooccurrence.ngramy.id][cooccurrence.ngramx.id] = cooccurrence.score
......@@ -193,12 +200,12 @@ def get_cooc(request=None, corpus_id=None, cooc_id=None, type=None, n=150):
for community in set(partition.values()):
#print(community)
G.add_node("cluster " + str(community), hidden=1)
for node in G.nodes():
try:
#node,type(labels[node])
G.node[node]['label'] = node
G.node[node]['name'] = node
G.node[node]['pk'] = ids[str(node)]
G.node[node]['size'] = weight[node]
G.node[node]['group'] = partition[node]
G.add_edge(node, "cluster " + str(partition[node]), weight=3)
......@@ -234,6 +241,42 @@ def get_cooc(request=None, corpus_id=None, cooc_id=None, type=None, n=150):
return data
def tfidf(corpus, document, ngram):
try:
occurences_of_ngram = Node_Ngram.objects.get(node=document, ngram=ngram).weight
ngrams_by_document = sum([ x.weight for x in Node_Ngram.objects.filter(node=document)])
term_frequency = occurences_of_ngram / ngrams_by_document
xx = Node.objects.filter(parent=corpus, type=NodeType.objects.get(name="Document")).count()
yy = Node_Ngram.objects.filter(ngram=ngram).count()
inverse_d_frequency= log(xx/yy)
# result = tf * idf
result = term_frequency * inverse_d_frequency
except Exception as error:
print(error)
result = 0
return result
def do_tfidf(corpus, reset=True):
with transaction.atomic():
if reset==True:
NodeNodeNgram.objects.filter(nodex=corpus).delete()
if isinstance(corpus, Node) and corpus.type.name == "Corpus":
for document in Node.objects.filter(parent=corpus, type=NodeType.objects.get(name="Document")):
for node_ngram in Node_Ngram.objects.filter(node=document):
try:
nnn = NodeNodeNgram.objects.get(nodex=corpus, nodey=document, ngram=node_ngram.ngram)
except:
score = tfidf(corpus, document, node_ngram.ngram)
nnn = NodeNodeNgram(nodex=corpus, nodey=node_ngram.node, ngram=node_ngram.ngram, score=score)
nnn.save()
else:
print("Only corpus implemented yet, you put instead:", type(corpus))
......
This diff is collapsed.
......@@ -27,7 +27,7 @@ CELERY_IMPORTS=("node.models",)
SECRET_KEY = 'bt)3n9v&a02cu7^^=+u_t2tmn8ex5fvx8$x4r*j*pb1yawd+rz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG = False
TEMPLATE_DEBUG = True
......
......@@ -17,36 +17,46 @@ urlpatterns = patterns('',
url(r'^login/', include(admin.site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
# User views
# User Home view
url(r'^$', views.home),
# Project Management
url(r'^projects/$', views.projects),
url(r'^project/(\d+)/delete/$', views.delete_project),
url(r'^project/(\d+)/$', views.project),
# Corpus management
url(r'^project/(\d+)/corpus/(\d+)/$', views.corpus),
url(r'^project/(\d+)/corpus/(\d+)/delete/$', views.delete_corpus),
url(r'^project/(\d+)/corpus/(\d+)/corpus.csv$', views.corpus_csv),
url(r'^project/(\d+)/corpus/(\d+)/timerange/(\d+)/(\d+)$', views.subcorpus),
# Visualizations
url(r'^corpus/(\d+)/explorer$', views.explorer_graph),
url(r'^corpus/(\d+)/matrix$', views.explorer_matrix),
url(r'^project/(\d+)/corpus/(\d+)/chart$', views.chart),
url(r'^corpus/(\d+)/explorer$', views.graph),
url(r'^corpus/(\d+)/matrix$', views.matrix),
# Getting data
# Data management
url(r'^chart/corpus/(\d+)/data.csv$', views.send_csv),
url(r'^corpus/(\d+)/node_link.json$', views.node_link),
url(r'^corpus/(\d+)/adjacency.json$', views.adjacency),
url(r'^api/tfidf/(\d+)/(\w+)$', views.tfidf),
# Data management
url(r'^api$', gargantext_web.api.Root),
url(r'^api/nodes/(\d+)/children/metadata$', gargantext_web.api.NodesChildrenMetatadata.as_view()),
url(r'^api/nodes/(\d+)/children/queries$', gargantext_web.api.NodesChildrenQueries.as_view()),
url(r'^api/nodes/(\d+)$', gargantext_web.api.Nodes.as_view()),
url(r'^api/nodes$', gargantext_web.api.NodesList.as_view()),
url(r'^api/nodes$', gargantext_web.api.NodesController.get),
url(r'^api/project/(\d+)/corpus/(\d+)/timerange/(\d+)/(\d+)$', views.subcorpusJSON),
url(r'^api/nodes/(\d+)/ngrams$', gargantext_web.api.CorpusController.ngrams),
url(r'^api/nodes/(\d+)/data$', gargantext_web.api.CorpusController.data),
url(r'^graph-it$', views.graph_it),
url(r'^ngrams$', views.ngrams),
url(r'^nodeinfo/(\d+)$', views.nodeinfo),
url(r'^tests/mvc$', views.tests_mvc),
)
......
This diff is collapsed.
......@@ -10,10 +10,6 @@ os.environ.setdefault("DJANGO_HSTORE_GLOBAL_REGISTER", "False")
from node.models import *
# Node.objects.get(id=26514).children.all().make_metadata_filterable()
# exit()
# Reset: all data
tables_to_empty = [
......@@ -41,7 +37,6 @@ metadata = {
'source': 'string',
'volume': 'string',
'text': 'text',
'date': 'datetime',
'page': 'string',
'doi': 'string',
'journal': 'string',
......@@ -72,15 +67,23 @@ french = Language.objects.get(iso2='fr')
print('Initialize users...')
try:
me = User.objects.get(username='mat')
me = User.objects.get(username='alexandre')
except:
me = User(username='mat')
me = User(username='alexandre')
me.save()
# Integration: node types
print('Initialize node types...')
try:
typeProject = NodeType.objects.get(name='Root')
except Exception as error:
print(error)
typeProject = NodeType(name='Root')
typeProject.save()
try:
typeProject = NodeType.objects.get(name='Project')
except Exception as error:
......@@ -102,15 +105,59 @@ except Exception as error:
typeDoc = NodeType(name='Document')
typeDoc.save()
try:
typeStem = NodeType.objects.get(name='Stem')
except Exception as error:
print(error)
typeStem = NodeType(name='Stem')
typeStem.save()
try:
typeTfidf = NodeType.objects.get(name='Tfidf')
except Exception as error:
print(error)
typeTfidf = NodeType(name='Tfidf')
typeTfidf.save()
try:
typeDoc = NodeType.objects.get(name='WhiteList')
except Exception as error:
print(error)
typeDoc = NodeType(name='WhiteList')
typeDoc.save()
try:
typeDoc = NodeType.objects.get(name='BlackList')
except Exception as error:
print(error)
typeDoc = NodeType(name='BlackList')
typeDoc.save()
try:
typeDoc = NodeType.objects.get(name='Synonyme')
except Exception as error:
print(error)
typeDoc = NodeType(name='Synonyme')
typeDoc.save()
try:
typeDoc = NodeType.objects.get(name='Cooccurrence')
except Exception as error:
print(error)
typeDoc = NodeType(name='Cooccurrence')
typeDoc.save()
# Integration: resource types
print('Initialize resource...')
try:
typePubmed = ResourceType.objects.get(name='pubmed')
typeIsi = ResourceType.objects.get(name='isi')
typeRis = ResourceType.objects.get(name='ris')
typePresse = ResourceType.objects.get(name='europress')
typePubmed = ResourceType.objects.get(name='pubmed')
typeIsi = ResourceType.objects.get(name='isi')
typeRis = ResourceType.objects.get(name='ris')
typePresseFr = ResourceType.objects.get(name='europress_french')
typePresseEn = ResourceType.objects.get(name='europress_english')
except Exception as error:
print(error)
......@@ -124,8 +171,19 @@ except Exception as error:
typeRis = ResourceType(name='ris')
typeRis.save()
typePresse = ResourceType(name='europress')
typePresse.save()
typePresseFr = ResourceType(name='europress_french')
typePresseFr.save()
typePresseEn = ResourceType(name='europress_english')
typePresseEn.save()
# Integration Node Stem
try:
stem = Node.objects.get(name='Stem')
except:
stem = Node(name='Stem', type=typeStem, user=me)
stem.save()
# Integration: project
......@@ -150,7 +208,8 @@ except:
print('Initialize resource...')
corpus_pubmed.add_resource(
# file='./data_samples/pubmed.zip',
file='./data_samples/pubmed_2013-04-01_HoneyBeesBeeBees.xml',
#file='./data_samples/pubmed_2013-04-01_HoneyBeesBeeBees.xml',
file='/srv/gargantext_lib/data_samples/pubmed.xml',
type=typePubmed,
user=me
)
......@@ -164,4 +223,4 @@ print('Extract corpus #%d...' % (corpus_pubmed.id, ))
corpus_pubmed.children.all().extract_ngrams(['title',])
print('Parsed corpus #%d.' % (corpus_pubmed.id, ))
exit()
\ No newline at end of file
exit()
......@@ -59,6 +59,42 @@ Populate the database
python manage.py syncdb
Last steps of configuration
---------------------------
1) If your project is not in /srv/gargantext:
ln -s [the project folder] /srv/gargantext
2) build gargantext_lib:
cd /srv/
wget http://docs.delanoe.org/gargantext_lib.tar.bz2
sudo tar xvjf gargantext_lib.tar.bz2
sudo chown user:user /srv/gargantext_lib
3) Explorer:
cd /srv/gargantext_lib/js
git clone git@github.com:PkSM3/garg.git
4) Adapt all symlinks:
ln -s [your folder for tree tagger] [the project folder]/parsing/Tagger/treetagger
Warning: for ln, path has to be absolute!
5) patch CTE:
patch /srv/gargantext_env/lib/python3.4/site-packages/cte_tree/models.py /srv/gargantext/init/cte_tree.models.diff
6) init nodetypes and main variables
/srv/gargantext/manage.py shell < /srv/gargantext/init/init.py
7) DO NOT use the default aldjemy package:
cd /tmp
git clone https://github.com/mathieurodic/aldjemy
cd aldjemy
python3 setup.py install
Extras
=======
Last steps of configuration:
----------------------------
......@@ -90,9 +126,6 @@ patch /srv/gargantext_env/lib/python3.4/site-packages/cte_tree/models.py /srv/ga
/srv/gargantext/manage.py shell < /srv/gargantext/init/init.py
Extras:
======
Start the Python Notebook server
--------------------------------
......
......@@ -31,4 +31,4 @@ sudo apt-get install liblapack-dev
source /srv/gargantext_env/bin/activate
pip install git+https://github.com/mathieurodic/aldjemy.git
pip3 install git+https://github.com/mathieurodic/aldjemy.git
......@@ -26,9 +26,9 @@ french = Language.objects.get(iso2='fr')
try:
me = User.objects.get(username='alexandre')
me = User.objects.get(username='pksm3')
except:
me = User(username='alexandre')
me = User(username='pksm3')
me.save()
......@@ -61,6 +61,23 @@ except Exception as error:
typeDoc = NodeType(name='Document')
typeDoc.save()
try:
typeStem = NodeType.objects.get(name='Stem')
except Exception as error:
print(error)
typeStem = NodeType(name='Stem')
typeStem.save()
try:
typeTfidf = NodeType.objects.get(name='Tfidf')
except Exception as error:
print(error)
typeTfidf = NodeType(name='Tfidf')
typeTfidf.save()
try:
typeDoc = NodeType.objects.get(name='WhiteList')
except Exception as error:
......@@ -132,4 +149,12 @@ except:
project = Node(name='Bees project', type=typeProject, user=me)
project.save()
try:
stem = Node.objects.get(name='Stem')
except:
stem = Node(name='Stem', type=typeStem, user=me)
stem.save()
drop schema public cascade ;
create schema public ;
create extension hstore ;
create schema authorization gargantext;
-- create schema authorization gargantext;
......@@ -199,6 +199,7 @@ class Node(CTENode):
self.node_resource.update(parsed=True)
@current_app.task(filter=task_method)
def extract_ngrams(self, keys, ngramsextractorscache=None, ngramscaches=None):
# if there is no cache...
if ngramsextractorscache is None:
......@@ -234,11 +235,12 @@ class Node(CTENode):
])
@current_app.task(filter=task_method)
def parse_and_extract_ngrams(self, keys=None, ngramsextractorscache=None, ngramscaches=None, verbose=False):
def workflow(self, keys=None, ngramsextractorscache=None, ngramscaches=None, verbose=False):
self.parse_resources()
type_document = NodeType.objects.get(name='Document')
self.children.filter(type_id=type_document.pk).extract_ngrams(keys=['title',])
from analysis.functions import do_tfidf
do_tfidf(self)
class Node_Metadata(models.Model):
node = models.ForeignKey(Node)
......@@ -295,7 +297,29 @@ class NodeNgramNgram(models.Model):
score = models.FloatField(default=0)
def __str__(self):
return "%s: %s / %s" % (self.node.name, self.ngramX.terms, self.ngramY.terms)
return "%s: %s / %s" % (self.node.name, self.ngramx.terms, self.ngramy.terms)
class NodeNodeNgram(models.Model):
nodex = models.ForeignKey(Node, related_name="nodex")
nodey = models.ForeignKey(Node, related_name="nodey")
ngram = models.ForeignKey(Ngram, on_delete=models.CASCADE)
score = models.FloatField(default=0)
def __str__(self):
return "%s: %s / %s = %s" % (self.nodex.name, self.nodey.name, self.ngram.terms, self.score)
class NodeNodeNgram(models.Model):
nodex = models.ForeignKey(Node, related_name="nodex")
nodey = models.ForeignKey(Node, related_name="nodey")
ngram = models.ForeignKey(Ngram, on_delete=models.CASCADE)
score = models.FloatField(default=0)
def __str__(self):
return "%s: %s / %s = %s" % (self.nodex.name, self.nodey.name, self.ngram.terms, self.score)
......@@ -3,6 +3,13 @@ from parsing.NgramsExtractors import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
st = PorterStemmer()
def stem_all(ngram):
return " ".join(map(lambda x: st.stem(x), ngram.split(" ")))
class NgramsCache(defaultdict):
"""This allows the fast retrieval of ngram ids
......@@ -16,12 +23,31 @@ class NgramsCache(defaultdict):
def __missing__(self, terms):
"""If the terms are not yet present in the dictionary,
retrieve it from the database or insert it."""
terms = terms.strip().lower()
try:
ngram = node.models.Ngram.get(terms=terms, language=self.language)
except:
ngram = node.models.Ngram.objects.get(terms=terms, language=self.language)
except Exception as error:
ngram = node.models.Ngram(terms=terms, n=len(terms.split()), language=self.language)
ngram.save()
stem_terms = stem_all(ngram.terms)
try:
stem = node.models.Ngram.objects.get(terms=stem_terms, language=ngram.language, n=ngram.n)
except:
stem = node.models.Ngram(terms=stem_terms, language=ngram.language, n=ngram.n)
stem.save()
type_stem = node.models.NodeType.objects.get(name='Stem')
node_stem = node.models.Node.objects.get(name='Stem', type=type_stem)
try:
node.models.NodeNgramNgram.objects.get(node=node_stem, ngramx=stem, ngramy=ngram)
except:
node.models.NodeNgramNgram(node=node_stem, ngramx=stem, ngramy=ngram, score=1).save()
self[terms] = ngram
return self[terms]
......
This diff is collapsed.
......@@ -3,7 +3,7 @@ import dateutil.parser
import zipfile
import chardet
from parsing.Caches import LanguagesCache
from ..Caches import LanguagesCache
class FileParser:
......
from parsing.FileParsers.RisFileParser import RisFileParser
from .RisFileParser import RisFileParser
class IsiFileParser(RisFileParser):
......
from django.db import transaction
from lxml import etree
from parsing.FileParsers.FileParser import FileParser
from parsing.NgramsExtractors import *
from .FileParser import FileParser
from ..NgramsExtractors import *
class PubmedFileParser(FileParser):
......@@ -25,13 +25,20 @@ class PubmedFileParser(FileParser):
"publication_year" : 'MedlineCitation/DateCreated/Year',
"publication_month" : 'MedlineCitation/DateCreated/Month',
"publication_day" : 'MedlineCitation/DateCreated/Day',
"authors" : 'MedlineCitation/Article/AuthorList',
}
for key, path in metadata_path.items():
try:
node = xml_article.find(path)
metadata[key] = node.text
xml_node = xml_article.find(path)
if key == 'authors':
metadata[key] = ', '.join([
xml_author.find('ForeName').text + ' ' + xml_author.find('LastName').text
for xml_author in xml_node
])
else:
metadata[key] = xml_node.text
except:
metadata[key] = ""
pass
metadata_list.append(metadata)
# return the list of metadata
return metadata_list
from django.db import transaction
from parsing.FileParsers.FileParser import FileParser
from .FileParser import FileParser
class RisFileParser(FileParser):
......
from parsing.NgramsExtractors.NgramsExtractor import NgramsExtractor
from parsing.Taggers import NltkTagger
from .NgramsExtractor import NgramsExtractor
from ..Taggers import NltkTagger
class EnglishNgramsExtractor(NgramsExtractor):
......
from parsing.Taggers.Tagger import Tagger
from .Tagger import Tagger
import nltk
......
from parsing.Taggers.Tagger import Tagger
from .Tagger import Tagger
import subprocess
import threading
......
# from NltkTagger import NltkTagger
# tagger = NltkTagger()
# text0 = "Forman Brown (1901–1996) was one of the world's leaders in puppet theatre in his day, as well as an important early gay novelist. He was a member of the Yale Puppeteers and the driving force behind Turnabout Theatre. He was born in Otsego, Michigan, in 1901 and died in 1996, two days after his 95th birthday. Brown briefly taught at North Carolina State College, followed by an extensive tour of Europe."
# text1 = "James Patrick (born c. 1940) is the pseudonym of a Scottish sociologist, which he used to publish a book A Glasgow Gang Observed. It attracted some attention in Scotland when it was published in 1973. It was based on research he had done in 1966, when he was aged 26. At that time he was working as a teacher in an Approved School, a Scottish reformatory. One gang member in the school, \"Tim Malloy\" (born 1950, also a pseudonym and a generic term for a Glasgow Catholic), agreed to infiltrate him into his gang in Maryhill in Glasgow. Patrick spent four months as a gang member, observing their behaviour."
from TreeTagger import TreeTagger
tagger = TreeTagger()
text0 = "La saison 1921-1922 du Foot-Ball Club Juventus est la vingtième de l'histoire du club, créé vingt-cinq ans plus tôt en 1897. La société turinoise qui fête cette année son 25e anniversaire prend part à l'édition du championnat dissident d'Italie de la CCI (appelé alors la Première division), la dernière édition d'une compétition annuelle de football avant l'ère fasciste de Mussolini."
text1 = "Le terme oblong désigne une forme qui est plus longue que large et dont les angles sont arrondis. En langage bibliographique, oblong signifie un format dont la largeur excède la hauteur. Ce qui correspond au format paysage en termes informatiques et \"à l'italienne\", pour l'imprimerie."
text2 = "Les sanglots longs des violons de l'automne bercent mon coeur d'une langueur monotone."
print()
print(tagger.tag_text(text0))
print()
print(tagger.tag_text(text1))
print()
print(tagger.tag_text(text2))
print()
\ No newline at end of file
from NgramsExtractors import *
from Taggers import *
#texts = [
# "This is quite a simple test.",
# "Forman Brown (1901–1996) was one of the world's leaders in puppet theatre in his day, as well as an important early gay novelist. He was a member of the Yale Puppeteers and the driving force behind Turnabout Theatre. He was born in Otsego, Michigan, in 1901 and died in 1996, two days after his 95th birthday. Brown briefly taught at North Carolina State College, followed by an extensive tour of Europe.",
# "James Patrick (born c. 1940) is the pseudonym of a Scottish sociologist, which he used to publish a book A Glasgow Gang Observed. It attracted some attention in Scotland when it was published in 1973. It was based on research he had done in 1966, when he was aged 26. At that time he was working as a teacher in an Approved School, a Scottish reformatory. One gang member in the school, \"Tim Malloy\" (born 1950, also a pseudonym and a generic term for a Glasgow Catholic), agreed to infiltrate him into his gang in Maryhill in Glasgow. Patrick spent four months as a gang member, observing their behaviour.",
#]
#tagger = NltkTagger()
#extractor = EnglishNgramsExtractor()
#
texts = [
"La saison 1921-1922 du Foot-Ball Club Juventus est la vingtième de l'histoire du club, créé vingt-cinq ans plus tôt en 1897. La société turinoise qui fête cette année son 25e anniversaire prend part à l'édition du championnat dissident d'Italie de la CCI (appelé alors la Première division), la dernière édition d'une compétition annuelle de football avant l'ère fasciste de Mussolini.",
"Le terme oblong désigne une forme qui est plus longue que large et dont les angles sont arrondis. En langage bibliographique, oblong signifie un format dont la largeur excède la hauteur. Ce qui correspond au format paysage en termes informatiques et \"à l'italienne\", pour l'imprimerie.",
"Les sanglots longs des violons de l'automne bercent mon coeur d'une langueur monotone.",
]
tagger = TreeTagger()
extractor = FrenchNgramsExtractor()
for text in texts:
print(tagger.tag_text(text))
print()
ngrams = extractor.extract_ngrams(text)
for ngram in ngrams:
print("\t" + str(ngram))
print("\n")
/*
AngularJS v1.2.28
(c) 2010-2014 Google, Inc. http://angularjs.org
License: MIT
*/
(function(p,f,n){'use strict';f.module("ngCookies",["ng"]).factory("$cookies",["$rootScope","$browser",function(e,b){var c={},g={},h,k=!1,l=f.copy,m=f.isUndefined;b.addPollFn(function(){var a=b.cookies();h!=a&&(h=a,l(a,g),l(a,c),k&&e.$apply())})();k=!0;e.$watch(function(){var a,d,e;for(a in g)m(c[a])&&b.cookies(a,n);for(a in c)d=c[a],f.isString(d)||(d=""+d,c[a]=d),d!==g[a]&&(b.cookies(a,d),e=!0);if(e)for(a in d=b.cookies(),c)c[a]!==d[a]&&(m(d[a])?delete c[a]:c[a]=d[a])});return c}]).factory("$cookieStore",
["$cookies",function(e){return{get:function(b){return(b=e[b])?f.fromJson(b):b},put:function(b,c){e[b]=f.toJson(c)},remove:function(b){delete e[b]}}}])})(window,window.angular);
//# sourceMappingURL=angular-cookies.min.js.map
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -378,7 +378,7 @@ buttonAddDataset.click(function() {
/*
$('.tree').jstree({
'core' : {
'data' : {
......@@ -412,5 +412,5 @@ buttonAddDataset.click(function() {
}
},
});
// var graph = $('.graph-it').graphIt(640, 480);
*/
var graph = $('.graph-it').graphIt(640, 480);
This diff is collapsed.
This diff is collapsed.
......@@ -11,6 +11,10 @@
{% block content %}
<div id="loading" style="position:absolute; top:50%; left:40%; width:80px; display: none;" >
<img src="{% static "img/ajax-loader.gif" %}" alt="" />
</div>
<div class="container theme-showcase" role="main">
<div class="jumbotron">
......@@ -25,4 +29,11 @@
</div>
</div>
<script>
function showLoader(){
$('#loading').setStyle('display', 'block');
$('submit_btn').disabled = true;
};
</script>
{% endblock %}
This diff is collapsed.
This diff is collapsed.
......@@ -261,7 +261,7 @@
</div>
<!-- <div id="topPapers"></div> -->
<div id="topPapers"></div>
<div id="information"></div>
</div>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment