Commit 47d208e1 authored by Mathieu Rodic's avatar Mathieu Rodic

[FEATURE] Added new tagger with TurboParser

parent f3f4458d
...@@ -397,6 +397,10 @@ class NodesChildrenQueries(APIView): ...@@ -397,6 +397,10 @@ class NodesChildrenQueries(APIView):
# return value # return value
return field, _operators[operator], value return field, _operators[operator], value
def _count_documents(self, query):
return {
'fields': []
}
def post(self, request, node_id): def post(self, request, node_id):
""" Query the children of the given node. """ Query the children of the given node.
......
nlpserver @ 02c30fd9
Subproject commit 02c30fd9c700c4f8a623479bb24821ff8b32d57c
...@@ -31,14 +31,14 @@ class NgramsExtractor: ...@@ -31,14 +31,14 @@ class NgramsExtractor:
tagged_ngrams = self.tagger.tag_text(contents) tagged_ngrams = self.tagger.tag_text(contents)
grammar = nltk.RegexpParser(self._rule) grammar = nltk.RegexpParser(self._rule)
result = [] result = []
try: # try:
grammar_parsed = grammar.parse(tagged_ngrams) grammar_parsed = grammar.parse(tagged_ngrams)
for subtree in grammar_parsed.subtrees(): for subtree in grammar_parsed.subtrees():
if subtree.label() == self._label: if subtree.label() == self._label:
result.append(subtree.leaves()) result.append(subtree.leaves())
except: # except Exception as e:
print("Problem while parsing rule '%s'" % (self._rule, )) # print("Problem while parsing rule '%s'" % (self._rule, ))
pass # print(e)
return result return result
......
from parsing.NgramsExtractors.FrenchNgramsExtractor import FrenchNgramsExtractor from parsing.NgramsExtractors.FrenchNgramsExtractor import FrenchNgramsExtractor
from parsing.NgramsExtractors.EnglishNgramsExtractor import EnglishNgramsExtractor from parsing.NgramsExtractors.TurboNgramsExtractor import TurboNgramsExtractor as EnglishNgramsExtractor
# from parsing.NgramsExtractors.EnglishNgramsExtractor import EnglishNgramsExtractor
from parsing.NgramsExtractors.NgramsExtractor import NgramsExtractor from parsing.NgramsExtractors.NgramsExtractor import NgramsExtractor
from parsing.Taggers.Tagger import Tagger from parsing.Taggers.Tagger import Tagger
from parsing.Taggers.NltkTagger import NltkTagger from parsing.Taggers.NltkTagger import NltkTagger
from parsing.Taggers.TreeTagger import TreeTagger from parsing.Taggers.TreeTagger import TreeTagger
from parsing.Taggers.TurboTagger import TurboTagger
GETTING STARTED
===============
* Download the following files (if all you need is tagging, the second
archive is not necessary):
- http://www.ark.cs.cmu.edu/TurboParser/sample_models/english_proj_tagger.tar.gz
- http://www.ark.cs.cmu.edu/TurboParser/sample_models/english_proj_parser.tar.gz
* Expand them, and place the extract files in the `data` directory
CONFIGURATION
=============
The settings for the server can be found in `settings.py`.
Please ensure the TCP port is not already in use on your machine, and that the path to the models are correct.
START/STOP THE SERVER
=====================
Simply run the following command to start: `./nlpserver start`
To stop: `./nlpserver stop`
If starting the server failed, have a look at the log in `nlpserver.log`.
import socket
import sys
import re
from .settings import server_type_client, server_host, server_port, server_buffer
from .settings import implemented_methods
class NLPClient:
def __init__(self):
self._socket = socket.socket(*server_type_client)
self._socket.connect((server_host, server_port))
for method_name in dir(self):
if method_name[0] != '_':
if method_name.upper() not in implemented_methods:
setattr(self, method_name, self._notimplemented)
def __del__(self):
self._socket.close()
def _notimplemented(self, *args, **kwargs):
raise NotImplementedError(
'Only the following methods are allowed: {}'.format(
', '.join(implemented_methods)
)
)
def _getline(self):
"""Get one line of text from the buffer
"""
buf = self._socket.recv(server_buffer).decode()
done = False
while not done:
if '\n' in buf:
line, buf = buf.split('\n', 1)
yield line
else:
more = self._socket.recv(server_buffer).decode()
if not more:
done = True
else:
buf += more
if buf:
yield buf
def _request(self, action, text, language, keys=None):
"""Generic method to request info from the server
"""
data = action + ' '
data += language + '\n'
data += re.sub(r'\n+', '\n', text)
data += '\n\n'
self.__init__()
self._socket.sendall(data.encode())
sentence = []
if keys is None:
for line in self._getline():
if not line:
if not sentence:
break
yield sentence
sentence = []
continue
sentence.append(line.split('\t'))
else:
for line in self._getline():
if not line:
if not sentence:
break
yield sentence
sentence = []
continue
values = line.split('\t')
sentence.append(dict(zip(keys, line.split('\t'))))
self.__del__()
def tokenize(self, text, language='english', asdict=False):
keys = ('token', ) if asdict else None
return self._request('TOKENIZE', text, language, keys)
def tag(self, text, language='english', asdict=False):
keys = ('token', 'tag', ) if asdict else None
return self._request('TAG', text, language, keys)
def lemmatize(self, text, language='english', asdict=False):
keys = ('token', 'tag', 'lemma') if asdict else None
return self._request('LEMMATIZE', text, language, keys)
def parse(self, text, language='english', asdict=False):
keys = ('token', 'tag', 'lemma', 'head', 'deprel', ) if asdict else None
return self._request('PARSE', text, language, keys)
# Benchmark when the script is called directly
if __name__ == '__main__':
from time import time
text = """Current therapeutics for schizophrenia, the typical and atypical antipsychotic class of drugs, derive their therapeutic benefit predominantly by antagonism of the dopamine D2 receptor subtype and have robust clinical benefit on positive symptoms of the disease with limited to no impact on negative symptoms and cognitive impairment. Driven by these therapeutic limitations of current treatments and the recognition that transmitter systems beyond the dopaminergic system in particular glutamatergic transmission contribute to the etiology of schizophrenia significant recent efforts have focused on the discovery and development of novel treatments for schizophrenia with mechanisms of action that are distinct from current drugs. Specifically, compounds selectively targeting the metabotropic glutamate receptor 2/3 subtype, phosphodiesterase subtype 10, glycine transporter subtype 1 and the alpha7 nicotinic acetylcholine receptor have been the subject of intense drug discovery and development efforts. Here we review recent clinical experience with the most advanced drug candidates targeting each of these novel mechanisms and discuss whether these new agents are living up to expectations."""
text = open('/home/mat/projects/parser/animal-farm.txt').read()
client = NLPClient()
iterations = int(sys.argv[1]) if len(sys.argv) > 1 else 1
for asdict in (False, True):
print()
print('Retrieving results as ' + (
'dict' if asdict else 'list'
) + 's')
print('---------------------------')
for method_name in dir(client):
if method_name[0] != '_':
method = getattr(client, method_name)
print('%-16s' % method_name, end='')
t0 = time()
n = 0.0
for i in range(0, iterations):
try:
for sentence in method(text, asdict=asdict):
n += 1.0
t = time() - t0
print('%8.2f s %8.2f ms per sentence' % (t, 1000*t/n if n else 0.0))
except NotImplementedError:
print('(not implemented)')
print()
# lemmatize 2.89 s 1.76 ms per sentence
# parse 25.21 s 15.37 ms per sentence
# tag 2.90 s 1.77 ms per sentence
# tokenize 0.19 s 0.12 ms per sentence
*.model
\ No newline at end of file
from nltk.stem import WordNetLemmatizer
from collections import defaultdict
lemmatizer = WordNetLemmatizer()
_lemmatize = lemmatizer.lemmatize
tags_translate = defaultdict(str)
tags_translate.update({
'J': 'a',
'N': 'n',
'V': 'v',
})
def lemmatize(token, tag):
tag_type = tags_translate[tag[0]]
return _lemmatize(token, tag_type) if tag_type else token
#!/bin/sh
# In case this bash file is placed in another directory (e.g., /etc/init.d),
# the following line should be changed to an absolute path
DAEMON_DIR=$( cd "$(dirname "$BASH_SOURCE[0]")" && pwd)
DAEMON_SCRIPT=$DAEMON_DIR/server.py
DAEMON_NAME=nlpserver
DAEMON_ARGS=
echo $DAEMON_SCRIPT
# DAEMON_USER=root
# The process ID of the script when it runs is stored here:
DAEMON_PID=/tmp/$DAEMON_NAME.pid
. /lib/lsb/init-functions
do_start () {
log_daemon_msg "Starting system '$DAEMON_NAME' daemon..."
start-stop-daemon --start --quiet \
--make-pidfile --pidfile $DAEMON_PID --background \
--startas /bin/bash -- -c "python3 $DAEMON_SCRIPT $DAEMON_ARGS > /tmp/$DAEMON_NAME.log 2>&1"
# --exec $DAEMON_SCRIPT \
# --user $DAEMON_USER --chuid $DAEMON_USER
log_end_msg $?
}
do_stop () {
log_daemon_msg "Stopping system '$DAEMON_NAME' daemon..."
start-stop-daemon --stop --pidfile $DAEMON_PID --retry 10
log_end_msg $?
}
case "$1" in
start|stop)
do_${1}
;;
restart|reload|force-reload)
do_stop
do_start
;;
status)
status_of_proc "$DAEMON_NAME" "$DAEMON" && exit 0 || exit $?
;;
*)
echo "Usage: $DAEMON_NAME {start|stop|restart|status}"
exit 1
;;
esac
exit 0
\ No newline at end of file
from settings import *
from sys import stderr
def print(text):
stderr.write(text + '\n')
print('PREPARING TURBOPARSER')
import turboparser
turbo_interface = turboparser.PTurboParser()
print('LOADING TOKENIZERS')
import nltk
sentence_tokenizer = nltk.data.load(tokenizer_model)
word_tokenizer = nltk.TreebankWordTokenizer()
if 'TAG' in implemented_methods or 'LEMMATIZE' in implemented_methods:
print('LOADING TAGGER')
tagger = turbo_interface.create_tagger()
tagger.load_tagger_model(b_tagger_model)
if 'LEMMATIZE' in implemented_methods or 'TAG' in implemented_methods or 'PARSE' in implemented_methods:
print('LOADING LEMMATIZER')
from lemmatizer import lemmatize
if 'PARSE' in implemented_methods:
print('LOADING PARSER')
parser = turbo_interface.create_parser()
parser.load_parser_model(b_parser_model)
def split_sentences(text):
return sentence_tokenizer.tokenize(text)
def tokenize(sentence):
return word_tokenizer.tokenize(sentence)
def tag_sentence(sentence):
# Write tokens to input file
f_input = open(tmp_input_path, 'w')
for token in tokenize(sentence):
f_input.write(token + '\t_\n')
f_input.close()
# Tag tokens
tagger.tag(b_tmp_input_path, b_tmp_output_path)
# Iterate through tagged tokens
f_output = open(tmp_output_path)
for line in f_output:
line = line.rstrip('\n')
if line == '':
continue
token, tag = line.split('\t')
yield (token, tag)
f_output.close()
def tag_lemmatize_sentence(sentence):
# Write tokens to input file
f_input = open(tmp_input_path, 'w')
for token in tokenize(sentence):
f_input.write(token + '\t_\n')
f_input.close()
# Tag tokens
tagger.tag(b_tmp_input_path, b_tmp_output_path)
# Iterate through tagged tokens
f_output = open(tmp_output_path)
for line in f_output:
line = line.rstrip('\n')
if line == '':
continue
token, tag = line.split('\t')
lemma = lemmatize(token, tag)
yield (token, tag, lemma)
f_output.close()
def parse_sentence(sentence):
# Write tokens to input file
f_input = open(tmp_input_path, 'w')
# Iterate through tagged tokens, prepare input
i = 0
for token, tag, lemma in tag_lemmatize_sentence(sentence):
i += 1
f_input.write(
# position
str(i) + '\t' +
# token
token + '\t' +
# lemma
lemma + '\t' +
# tag (twice)
tag + '\t' +
tag + '\t' +
# filler
'_\t_\t_\n'
)
f_input.close()
# Parse sentence
parser.parse(b_tmp_input_path, b_tmp_output_path)
# Iterate through parsed stuff
f_output = open(tmp_output_path)
for line in f_output:
line = line.rstrip('\n')
if line == '':
continue
fields = line.split('\t')
#
token = fields[1]
lemma = fields[2]
tag = fields[3]
head = str(int(fields[6]) - 1)
deprel = fields[7]
yield (token, tag, head, deprel)
#!python3
import pipeline
import socketserver
from settings import server_type_server, server_host, server_port, server_timeout
from settings import b_implemented_methods
actions = {
b'TAG': pipeline.tag_sentence,
b'LEMMATIZE': pipeline.tag_lemmatize_sentence,
b'PARSE': pipeline.parse_sentence,
}
class NLPServer(socketserver.StreamRequestHandler):
def handle(self):
# What kind of request are we handling?
firstline = self.rfile.readline()
parameters = firstline.split()
if len(parameters) != 2:
self.wfile.write(b'\n\n')
return
action, language = parameters
if action not in b_implemented_methods:
self.wfile.write(b'\n\n')
return
# Get the text data
text = ''
while True:
line = self.rfile.readline().decode()
if not line.strip():
break
text += line
text += '\n'
# Execute the action
method = actions.get(action, None)
if method is None:
for sentence in pipeline.split_sentences(text):
for token in pipeline.tokenize(sentence):
self.wfile.write(
token.encode() + b'\n'
)
self.wfile.write(b'\n')
self.wfile.write(b'\n')
else:
for sentence in pipeline.split_sentences(text):
for row in method(sentence):
self.wfile.write(
(
'\t'.join(row)
).encode() + b'\n'
)
self.wfile.write(b'\n')
self.wfile.write(b'\n')
def handle_timeout(self):
self.request.sendall(b'\n\n')
if __name__ == '__main__':
print('STARTING TCP SERVER')
server = server_type_server((server_host, server_port), NLPServer)
server.timeout = server_timeout
try:
server.serve_forever()
except (KeyboardInterrupt, SystemExit):
print('STOPPING TCP SERVER')
server.shutdown()
import os
import socket
import socketserver
# Server parameters
server_host = 'localhost'
server_port = 1234
server_type_server = socketserver.TCPServer
server_type_client = socket.AF_INET, socket.SOCK_STREAM
server_timeout = 2.0
server_buffer = 4096
# Implemented methods (other are treated as 'tokenize')
implemented_methods = {'TOKENIZE', 'TAG', 'LEMMATIZE'}
# server_methods = {'TOKENIZE', 'TAG', 'LEMMATIZE', 'PARSE'}
b_implemented_methods = {name.encode() for name in implemented_methods}
# Models
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
tokenizer_model = os.path.join(data_dir, 'english.pickle')
tagger_model = os.path.join(data_dir, 'english_proj_tagger.model')
# parser_model = 'data/210basic_sd330'
parser_model = os.path.join(data_dir, 'english_proj_parser_pruned-true_model-full.model')
b_tagger_model = tagger_model.encode()
b_parser_model = parser_model.encode()
# Temporary files access
tmp_input_path = '/tmp/nlpserver_input.tmp'
tmp_output_path = '/tmp/nlpserver_output.tmp'
b_tmp_input_path = tmp_input_path.encode()
b_tmp_output_path = tmp_output_path.encode()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment