Commit 47c074ed authored by PkSM3's avatar PkSM3

[UPDATE] automatic status-reloader until workflow is finished

parent ddfe5839
......@@ -57,6 +57,7 @@ class WorkflowTracking:
cursor = connection.cursor()
try:
cursor.execute(the_query)
cursor.execute("COMMIT;")
finally:
connection.close()
except :
......
......@@ -43,9 +43,9 @@ def apply_workflow(corpus_id):
ngram_workflow(corpus)
#ngrams2miam(user_id=corpus.user_id, corpus_id=corpus_id)
update_state.processing_(corpus, "0")
print("End of the Workflow for corpus %d" % (corpus_id))
update_state.processing_(corpus, "0")
@shared_task
......
......@@ -92,7 +92,9 @@ urlpatterns = patterns('',
############################################################################
url(r'^tests/', include('tests.urls')),
url(r'^project/(\d+)/corpus/(\d+)/terms$', samtest.get_ngrams),
url(r'^api/corpus/(\d+)$', samtest.get_corpus_state),
url(r'^test_cores$', samtest.get_cores)
)
......
......@@ -345,12 +345,14 @@ def corpus(request, project_id, corpus_id):
type_doc_id = cache.NodeType['Document'].id
number = session.query(func.count(Node.id)).filter(Node.parent_id==corpus_id, Node.type_id==type_doc_id).all()[0][0]
the_query = """ SELECT hyperdata FROM node_node WHERE id=%d """ % ( int(corpus_id) )
cursor = connection.cursor()
try:
processing = corpus.hyperdata['Processing']
except Exception as error:
print(error)
processing = 0
print('corpus',corpus_id,' , processing', processing)
cursor.execute(the_query)
processing = cursor.fetchone()[0]["Processing"]
except:
processing = "Error"
html = t.render(Context({
'debug': settings.DEBUG,
......
......@@ -140,7 +140,7 @@ def project(request, project_id):
parent_id = project_id,
type_id = cache.NodeType['Corpus'].id,
language_id = language_id,
hyperdata = {'Processing' : 1,}
hyperdata = {'Processing' : "Parsing documents",}
)
session.add(corpus)
session.commit()
......
......@@ -49,7 +49,6 @@ def ngram_workflow(corpus, n=5000):
compute_tfidf(corpus)
# update_state.processing_(corpus, "OCCS local score")
# compute_occs(corpus)
#corpus=session.query(Node).filter(Node.id==540420).first()
#corpus=session.query(Node).filter(Node.id==559637).first()
......
......@@ -269,42 +269,42 @@ class Node(CTENode):
for ngram_text, weight in associations.items()
])
@current_app.task(filter=task_method)
def workflow(self, keys=None, ngramsextractorscache=None, ngramscaches=None, verbose=False):
import time
total = 0
print("LOG::TIME: In workflow() parse_resources()")
start = time.time()
self.hyperdata['Processing'] = 1
self.save()
self.parse_resources()
end = time.time()
total += (end - start)
print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" parse_resources() [s]",(end - start))
print("LOG::TIME: In workflow() / parse_resources()")
start = time.time()
print("LOG::TIME: In workflow() extract_ngrams()")
print("\n- - - - - - - - - -")
type_document = NodeType.objects.get(name='Document')
self.children.filter(type_id=type_document.pk).extract_ngrams(keys=['title',])
end = time.time()
print("- - - - - - - - - - \n")
total += (end - start)
print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" extract_ngrams() [s]",(end - start))
print("LOG::TIME: In workflow() / extract_ngrams()")
start = time.time()
print("In workflow() do_tfidf()")
from analysis.functions import do_tfidf
do_tfidf(self)
end = time.time()
total += (end - start)
print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" do_tfidf() [s]",(end - start))
print("LOG::TIME: In workflow() / do_tfidf()")
print("In workflow() END")
self.hyperdata['Processing'] = 0
self.save()
# @current_app.task(filter=task_method)
# def workflow(self, keys=None, ngramsextractorscache=None, ngramscaches=None, verbose=False):
# import time
# total = 0
# print("LOG::TIME: In workflow() parse_resources()")
# start = time.time()
# self.hyperdata['Processing'] = 1
# self.save()
# self.parse_resources()
# end = time.time()
# total += (end - start)
# print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" parse_resources() [s]",(end - start))
# print("LOG::TIME: In workflow() / parse_resources()")
# start = time.time()
# print("LOG::TIME: In workflow() extract_ngrams()")
# print("\n- - - - - - - - - -")
# type_document = NodeType.objects.get(name='Document')
# self.children.filter(type_id=type_document.pk).extract_ngrams(keys=['title',])
# end = time.time()
# print("- - - - - - - - - - \n")
# total += (end - start)
# print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" extract_ngrams() [s]",(end - start))
# print("LOG::TIME: In workflow() / extract_ngrams()")
# start = time.time()
# print("In workflow() do_tfidf()")
# from analysis.functions import do_tfidf
# do_tfidf(self)
# end = time.time()
# total += (end - start)
# print ("LOG::TIME:_ "+datetime.datetime.now().isoformat()+" do_tfidf() [s]",(end - start))
# print("LOG::TIME: In workflow() / do_tfidf()")
# print("In workflow() END")
# self.hyperdata['Processing'] = 0
# self.save()
class Node_Hyperdata(models.Model):
node = models.ForeignKey(Node, on_delete=models.CASCADE)
......
......@@ -45,19 +45,22 @@ class MedlineFetcher:
query = query.replace(' ', '%20')
eSearch = '%s/esearch.fcgi?db=%s&retmax=1&usehistory=y&term=%s' %(self.pubMedEutilsURL, self.pubMedDB, query)
eSearchResult = urlopen(eSearch)
data = eSearchResult.read()
root = etree.XML(data)
findcount = etree.XPath("/eSearchResult/Count/text()")
count = findcount(root)[0]
findquerykey = etree.XPath("/eSearchResult/QueryKey/text()")
queryKey = findquerykey(root)[0]
findwebenv = etree.XPath("/eSearchResult/WebEnv/text()")
webEnv = findwebenv(root)[0]
try:
eSearchResult = urlopen(eSearch)
data = eSearchResult.read()
root = etree.XML(data)
findcount = etree.XPath("/eSearchResult/Count/text()")
count = findcount(root)[0]
findquerykey = etree.XPath("/eSearchResult/QueryKey/text()")
queryKey = findquerykey(root)[0]
findwebenv = etree.XPath("/eSearchResult/WebEnv/text()")
webEnv = findwebenv(root)[0]
except:
count=0
queryKey=False
webEnv=False
origQuery=False
values = { "query":origQuery , "count": int(str(count)), "queryKey": queryKey , "webEnv":webEnv }
return values
......@@ -173,8 +176,13 @@ class MedlineFetcher:
self.q.join()
print('time:',time.perf_counter() - start)
Total = 0
Fails = 0
for globalresults in self.firstResults:
# globalresults = self.medlineEsearch(pubmedquery)
Total += 1
if globalresults["queryKey"]==False:
Fails += 1
if globalresults["count"]>0:
N+=globalresults["count"]
queryhyperdata = {
......@@ -198,4 +206,7 @@ class MedlineFetcher:
if query["retmax"]==0: query["retmax"]+=1
print(query["string"],"\t[",k,">",query["retmax"],"]")
if ((Fails+1)/(Total+1))==1 : # for identifying the epic fail or connection error
thequeries = [False]
return thequeries
......@@ -130,7 +130,7 @@ def doTheQuery(request , project_id):
parent_id = project_id,
type_id = cache.NodeType['Corpus'].id,
language_id = None,
hyperdata = {'Processing' : 1,}
hyperdata = {'Processing' : "Parsing documents",}
)
session.add(corpus)
session.commit()
......@@ -243,7 +243,7 @@ def testISTEX(request , project_id):
parent_id = project_id,
type_id = cache.NodeType['Corpus'].id,
language_id = None,
hyperdata = {'Processing' : 1,}
hyperdata = {'Processing' : "Parsing documents",}
)
session.add(corpus)
session.commit()
......
......@@ -85,7 +85,8 @@
</div>
</div>
<span style="display:none;" id="process_state">{{processing}}</span>
<span style="display:none;" id="corpus_id">{{corpus.id}}</span>
<div class="col-md-6">
<div class="jumbotron">
{% if processing == 0 or processing == "0" %}
......@@ -96,8 +97,9 @@
<li>Authors and Terms</li>
</ol>
{% else %}
<h3><img width="20px" src="{% static "js/libs/img2/loading-bar.gif" %}"></img> Networks </h3>
<h6>(Updating: <i>{{processing}}</i>)</h6>
<h6>(Updating: <i id="process_id" data-since="date" >{{processing}}</i>)</h6>
<ol>
<li>Terms</li>
<li>Journals and Terms</li>
......@@ -134,6 +136,35 @@
return window.open(url_,'_blank');
}
var refresh_time = 3000 //ms
function corpus_monitorer() {
var url_ = "/api/corpus/"+$("#corpus_id").text()
$.ajax({
type: "GET",
url: url_,
dataType: "json",
success : function(data, textStatus, jqXHR) {
if(data["Processing"]=="0") {
window.location.reload()
} else {
$("#process_id").html(data["Processing"]+"...")
}
},
error: function(exception) {
console.log("exception!:"+exception.status)
}
});
}
if( $("#process_state").text()=="0" ) {
// workflow : finished!
} else {
setInterval(corpus_monitorer ,refresh_time);
}
</script>
......
......@@ -24,26 +24,6 @@
<script type="text/javascript">
var refresh_time = 5000 //ms
function corpus_monitorer() {
console.log("hola")
// $.ajax({
// type: "GET",
// url: "https://dl.dropboxusercontent.com/u/9975992/climat/ajax_file.json",
// dataType: "json",
// success : function(data, textStatus, jqXHR) {
// if( data.command ) {
// eval( data.command )
// }
// },
// error: function(exception) {
// console.log("exception!:"+exception.status)
// }
// });
}
setInterval(corpus_monitorer ,refresh_time);
</script>
......@@ -363,6 +343,7 @@
xhr.setRequestHeader("X-CSRFToken", getCookie("csrftoken"));
},
success: function(data) {
console.log("SUCCESS")
console.log("in getGlobalResults")
console.log(data)
console.log("enabling "+"#"+value.id)
......@@ -379,12 +360,15 @@
$('#submit_thing').prop('disabled', false);
} else {
$("#theresults").html("<i> <b>"+pubmedquery+"</b>: No results!.</i><br>")
if(data[0]==false)
$("#theresults").html("Pubmed connection error!</i><br>")
$('#submit_thing').prop('disabled', true);
}
},
error: function(result) {
console.log("Data not found");
$("#theresults").html("Pubmed connection error!</i><br>")
$('#submit_thing').prop('disabled', true);
}
});
}
......
......@@ -166,4 +166,19 @@ def get_corpuses( request , node_ids ):
def get_cores( request ):
import multiprocessing
cpus = multiprocessing.cpu_count()
return JsonHttpResponse( {"data":cpus} )
\ No newline at end of file
return JsonHttpResponse( {"data":cpus} )
def get_corpus_state( request , corpus_id ):
if not request.user.is_authenticated():
return JsonHttpResponse( {"request" : "forbidden"} )
processing = ["Waiting"]
the_query = """ SELECT hyperdata FROM node_node WHERE id=%d """ % ( int(corpus_id) )
cursor = connection.cursor()
try:
cursor.execute(the_query)
processing = cursor.fetchone()[0]
finally:
connection.close()
# processing = corpus.hyperdata['Processing']
return JsonHttpResponse( processing )
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment