Commit 91602c2f authored by Mathieu Rodic's avatar Mathieu Rodic

[FEAT] made MElt tagger independent from the installation of a software

https://forge.iscpif.fr/issues/1500
parent 4cead2ac
......@@ -52,19 +52,19 @@ class MeltTagger(Tagger):
def start(self, language='fr', melt_data_path='melttagger'):
basepath = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(basepath, melt_data_path, language)
path = os.path.join(basepath, melt_data_path)
self._pos_tagger = POSTagger()
self._pos_tagger.load_tag_dictionary('%s/tag_dict.json' % path)
self._pos_tagger.load_lexicon('%s/lexicon.json' % path)
self._pos_tagger.load_model('%s' % path)
self._pos_tagger.load_tag_dictionary('%s/%s/tag_dict.json' % (path, language))
self._pos_tagger.load_lexicon('%s/%s/lexicon.json' % (path, language))
self._pos_tagger.load_model('%s/%s' % (path, language))
self._preprocessing_commands = (
# ('/usr/local/bin/clean_noisy_characters.sh', ),
# ('/usr/local/bin/MElt_normalizer.pl', '-nc', '-c', '-d', '/usr/local/share/melt/normalization/%s' % language, '-l', language, ),
('/usr/local/share/melt/segmenteur.pl', '-a', '-ca', '-af=/usr/local/share/melt/pctabr', '-p', 'r'),
('%s/MElt_normalizer.pl' % path, '-nc', '-c', '-d', '%s/%s' % (path, language), '-l', language, ),
('%s/segmenteur.pl' % path, '-a', '-ca', '-af=%s/pctabr' % path, '-p', 'r'),
)
self._lemmatization_commands = (
('/usr/local/bin/MElt_postprocess.pl', '-npp', '-l', language),
('MElt_lemmatizer.pl', '-m', '/usr/local/share/melt/%s' % language),
('%s/MElt_postprocess.pl' % path, '-npp', '-l', language),
('%s/MElt_lemmatizer.pl' % path, '-m', '%s/%s' % (path, language)),
)
def stop(self):
......
#!/usr/bin/perl
use utf8;
use locale;
binmode STDIN, ":utf8";
binmode STDOUT, ":utf8";
binmode STDERR, ":utf8";
use DBI;
use Encode;
my $datadir = ".";
my $language = "";
my $model = "";
my $lexfile = "";
my $it_mapping = 0;
my $flag_unknowns = "*";
my $verbose = 0;
my $multiple_lemmas = 0;
my $silent = 0;
while (1) {
$_ = shift;
if (/^-l$/) {$language = shift;}
elsif (/^-m$/) {$model = shift;}
elsif (/^-nv$/) {$silent = 1;}
elsif (/^-l?db$/) {$dbfile = shift;}
elsif (/^-nfu$/) {$flag_unknowns = "";}
elsif (/^-v$/) {$verbose = 1;}
elsif (/^-itmapping$/) {$it_mapping = 1;}
elsif (/^-lcl$/) {$lower_case_lemmas = 1;}
elsif (/^-ml$/) {$multiple_lemmas = 1;}
elsif (/^-h$/ || /^--?help^/) {
print STDERR <<END;
Usage: MElt_lemmatizer.pl [ -l language | -m model | -lex lexicon ] [ -nfu ] [ -itmapping ] [ -lcl ] < input > output
Input: POS-tagged text in Brown format. The text *must* have been tagged using MElt, as this lemmatizer is based
on the (external) lexicon used by a particular MElt model and on the tags assigned by MElt using this model
Brown format: word1/pos1 word2/pos2 ... wordn/posn (newline = new sentence)
Output: word1/pos1/lemma1 word2/pos2/lemma2 ... wordn/posn/lemman (newline = new sentence; lemmas for words
unknown to the lexicon are prefixed with '*')
Options:
-l language Use the lexicon of the default MElt model for language 'language'
-m model Use the lexicon of the MElt model to be found in the directory 'model'
-lex lexicon Use the lexicon provided
-v Verbose (outputs information about the options used on STDERR before lemmatizing)
-nfu Do not prefix lemmas for forms unknown to the lexicon with the character '*'
-lcl Output all lemmas in lowercase
-itmapping Triggers special conversion and adaptation rules for Italian
-h Print this
END
exit(0);
}
elsif (/^$/) {last}
}
if ($lang eq "it") {$itmapping = 1}
if ($dbfile eq "") {
if ($model ne "") {
if ($language ne "") {
die "Error: options -l and -m can not be used simultaneously";
}
} else {
if ($language eq "") {
$language = "fr";
}
$model = $datadir."/".$language;
}
$dbfile = $model."/lemmatization_data.db";
} else {
if ($language ne "" || $model ne "") {
die "Error: option -lex can not be used with options -l or -m";
}
}
if ($verbose) {
print STDERR "Lemmatization database used: $dbfile\n";
if ($flag_unknowns eq "") {
print STDERR "Lemmas for forms unknown to the lexicon are not prefixed by any special character\n" ;
} else {
print STDERR "Lemmas for forms unknown to the lexicon are prefixed with the character '$flag_unknowns'\n" ;
}
print STDERR "Lemmas are lowercased\n" if ($lower_case_lemmas);
print STDERR "Special mappings for Italian activated\n" if ($it_mapping);
}
my $dbh = DBI->connect("dbi:SQLite:$dbfile", "", "", {RaiseError => 1, AutoCommit => 0});
my $sth_cfl=$dbh->prepare('select lemma from cat_form2lemma where cat=? and form=?');
my $sth_cfslsc1=$dbh->prepare('select lemmasuff from cat_formsuff_lemmasuff2count where cat=? and formsuff=? limit 1');
my $sth_cfslsc2=$dbh->prepare('select lemmasuff from cat_formsuff_lemmasuff2count where cat=? and formsuff=? order by count limit 1');
my $sth_cfslsc3=$dbh->prepare('select lemmasuff from cat_formsuff_lemmasuff2count where cat=? and formsuff=?');
%equiv = (
"--RBR--" => ")",
"--LBR--" => "(",
"--RRB--" => ")",
"--LRB--" => "(",
);
print STDERR " LEMMATIZER: Lemmatizing...\n" unless $silent;
my %get_cat_form2lemma_cache;
my %includes_data_for_cat_formsuff_cache;
my %get_best_lemmasuffs_cache;
my %get_all_lemmasuffs_cache;
while (<>) {
chomp;
s/^\s+//;
s/\s+$//;
if (/^$/) {
print "\n";
next;
}
@result = ();
s/$/ /;
while (s/^ *((?:\[\|.*?\|\] *)?(?:\( *)?(?:{.*?} *)?)([^{ ][^ ]*?)\/([^\/ \)\|]+)((?: *[\|\)][\|\(\)]*)?) +([^ \|\)]|[\|\)][^ \|\)]|$)/$5/) {
$comment = $1;
$token = $2;
$cat = $3;
$post = $4;
$postcat = "";
if ($cat =~ s/(-UNK.*)$//) {
$postcat = $1;
}
$lemma = "";
if (get_cat_form2lemma($cat,$token) ne "") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,$token);
} elsif (get_cat_form2lemma($cat,lc($token)) ne "") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,lc($token));
} elsif (get_cat_form2lemma($cat,$equiv{$token}) ne "") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,$equiv{$token});
} elsif ($it_mapping && $token !~ /^[A-ZÉ]/ && $token =~ /^(.*?)(lo|la|mi|ne|gli|si|li|le)$/ && get_cat_form2lemma(VERB,lc($1)) ne "" && get_cat_form2lemma(PRON,lc($2)) ne "") {
if ($cat ne "PRON") {
push @result, "$comment$token/VERB$postcat/".get_cat_form2lemma(VERB,lc($1));
} elsif ($cat eq "PRON") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,lc($2));
}
} elsif ($it_mapping && $token !~ /^[A-ZÉ]/ && $token =~ /^(.*?)(lo|la|mi|ne|gli|si|li|le)$/ && get_cat_form2lemma(VERB,lc($1."e")) ne "" && get_cat_form2lemma(PRON,lc($2)) ne "") {
if ($cat ne "PRON") {
push @result, "$comment$token/VERB$postcat/".get_cat_form2lemma(VERB,lc($1."e"));
} elsif ($cat eq "PRON") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,lc($2));
}
} elsif ($it_mapping && $token !~ /^[A-ZÉ]/ && $token =~ /^(.*?)(.)(lo|la|mi|ne|gli|si|li|le)$/ && get_cat_form2lemma(VERB,lc($1.$2.$2."e")) ne "" && get_cat_form2lemma(PRON,lc($3)) ne "") {
if ($cat ne "PRON") {
push @result, "$comment$token/VERB$postcat/".get_cat_form2lemma(VERB,lc($1.$2.$2."e"));
} elsif ($cat eq "PRON") {
push @result, "$comment$token/$cat$postcat/".get_cat_form2lemma($cat,lc($3));
}
} elsif ($it_mapping && $token !~ /^[A-ZÉ]/ && $token =~ /^(.*)[ai]$/ && $cat =~ /^(NOUN|ADJ|PRON)$/) {
if ($lower_case_lemmas) {
push @result, "$comment$token/$cat$postcat/".lc($1)."o";
} else {
push @result, "$comment$token/$cat$postcat/$1o";
}
} else {
if ($token !~ /^[A-ZÉ]/) {
$token_suff = $token;
$token_pref = "";
while ($token_suff =~ s/^(.)(?=.)//) {
$token_pref .= $1;
if (includes_data_for_cat_formsuff($cat,$token_suff)) {
if ($multiple_lemmas) {
$lemma = get_all_lemmasuffs($cat,$token_suff,$token_pref)
} else {
$lemma = get_best_lemmasuffs($cat,$token_suff,$token_pref);
}
last;
}
}
}
if ($lemma eq "") {$lemma = $token}
if ($lower_case_lemmas) {
push @result, "$comment$token/$cat$postcat/$flag_unknowns".lc($lemma);
} else {
push @result, "$comment$token/$cat$postcat/$flag_unknowns".$lemma;
}
}
}
$what_remains = $_;
$_ = join(" ",@result);
if ($what_remains =~ /^(\[\|.*?\|\])/) {
$_ .= $1;
}
$what_remains =~ s/^\s*//;
die $what_remains if ($what_remains ne "");
print $_.$post."\n";
}
print STDERR " LEMMATIZER: Lemmatizing: done\n" unless $silent;
sub get_cat_form2lemma {
my $cat = shift;
my $form = shift;
if (defined($get_cat_form2lemma_cache{$cat}{$form})) {
return $get_cat_form2lemma_cache{$cat}{$form};
}
$sth_cfl->execute($cat,$form);
my %results = ();
while (my $value = $sth_cfl->fetchrow) {
$results{Encode::decode("utf8",$value)} = 1;
}
$sth_cfl->finish;
my $result = (join "|", sort {$a cmp $b} keys %results);
$get_cat_form2lemma_cache{$cat}{$form} = $result;
return $result;
}
sub includes_data_for_cat_formsuff {
my $cat = shift;
my $formsuff = shift;
if (defined($includes_data_for_cat_formsuff_cache{$cat}{$formsuff})) {
return $includes_data_for_cat_formsuff_cache{$cat}{$formsuff};
}
$sth_cfslsc1->execute($cat,$formsuff);
my $result = 0;
while (my $value = $sth_cfslsc1->fetchrow) {
$result = 1;
last;
}
$sth_cfslsc1->finish;
$includes_data_for_cat_formsuff_cache{$cat}{$form} = $result;
return $result;
}
sub get_all_lemmasuffs {
my $cat = shift;
my $form = shift;
my $token_pref = shift;
if (defined($get_all_lemmasuffs_cache{$cat}{$form})) {
return $get_all_lemmasuffs_cache{$cat}{$form};
}
$sth_cfslsc3->execute($cat,$form);
my %results = ();
while (my $value = $sth_cfslsc3->fetchrow) {
$results{$token_pref.Encode::decode("utf8",$value)} = 1;
}
$sth_cfslsc3->finish;
my $result = (join "|", sort {$a cmp $b} keys %results);
$get_all_lemmasuffs_cache{$cat}{$form} = $result;
return $result;
}
sub get_best_lemmasuffs {
my $cat = shift;
my $form = shift;
my $token_pref = shift;
if (defined($get_best_lemmasuffs_cache{$cat}{$form})) {
return $get_best_lemmasuffs_cache{$cat}{$form};
}
$sth_cfslsc2->execute($cat,$form);
my $result;
while (my $value = $sth_cfslsc2->fetchrow) {
$result = $token_pref.Encode::decode("utf8",$value);
last;
}
$sth_cfslsc2->finish;
$get_best_lemmasuffs_cache{$cat}{$form} = $result;
return $result;
}
#!/usr/bin/perl
binmode STDIN, ":utf8";
binmode STDOUT, ":utf8";
binmode STDERR, ":utf8";
use utf8;
use locale;
$do_not_load_lexicon=0;
while (1) {
$_ = shift;
if (/^-d$/) {$ngrams_file_dir = shift}
elsif (/-nc$/) {$no_correction = 1}
elsif (/^-nolex$/) {$do_not_load_lexicon = 1}
elsif (/^-c$/) {$has_sxpipe_comments = 1}
elsif (/^-l$/) {$lang = shift || die "Please provide a language code after option -l"}
elsif (/^$/) {last}
else {die "Unknown option '$_'"}
}
if ($lang eq "zzz" || $no_correction) {
while (<>) {
chomp;
print $_."\n";
}
exit 0;
}
$ngrams_file_dir .= "/" unless $ngrams_file_dir eq "" || $ngrams_file_dir =~ /\/$/;
print STDERR " NORMALIZER: Loading lexical information for language $lang...\n";
if (-d "$ngrams_file_dir") {
unless ($do_not_load_lexicon) {
if (-e "${ngrams_file_dir}lex") {
open FILE, "${ngrams_file_dir}lex";
binmode FILE, ":utf8";
while (<FILE>) {
chomp;
s/(^|[^\\])#.*//;
next if /^\s*$/;
next if /^_/;
/^(.*?)\t(.*?)\t(.*)$/ || next;
$form = $1;
$cat = $2;
$ms = $3;
$form =~ s/__.*$//;
if ($lang eq "fr") {
$adj_nom_voyelle{$form} = 1 if ($cat =~ /^(adj|nom)/ && $form =~ /^[aeiuoé]/);
$verbe_voyelle{$form} = 1 if ($cat eq "v" && $form =~ /^[aeiuoé]/);
$inf{$form} = 1 if ($cat eq "v" && $ms eq "W");
$verbe_1s{$form} = 1 if ($cat eq "v" && $ms =~ /1/);
$lex_final_e{$form} = 1 if $form =~ /e$/;
$lex_final_s{$form} = 1 if $form =~ /s$/;
$lex_final_t{$form} = 1 if $form =~ /t$/;
}
$lex{$form} = 1;
}
close FILE;
if ($lang eq "fr") {
for (sort {length($b) <=> length($a)} keys %adj_nom_voyelle) {
if (!defined($lex{"l".$_})) {
$glueddet{"l".$_} = "{l$_◀l'} l' {} $_";
}
if (!defined($lex{"d".$_})) {
$glueddet{"d".$_} = "{d$_◀d'} d' {} $_";
}
}
for (sort {length($b) <=> length($a)} keys %verbe_voyelle) {
if (!defined($lex{"l".$_})) {
$gluedclit{"s".$_} = "{s$_◀s'} s' {} $_";
}
if (!defined($lex{"d".$_})) {
$gluedclit{"n".$_} = "{n$_◀n'} n' {} $_";
}
}
for (sort {length($b) <=> length($a)} keys %inf) {
if (!defined($lex{"2".$_})) {
$glued2{"2".$_} = "{2$_◀2=de} de {} $_";
}
}
for (sort {length($b) <=> length($a)} keys %verbe_1s) {
if (!defined($lex{"j".$_})) {
$gluedj{"j".$_} = "{j$_◀j'} j' {} $_";
}
if (!defined($lex{"J".$_})) {
$gluedj{"J".$_} = "{J$_◀J'} J' {} $_";
}
}
}
} else {
print STDERR " NORMALIZER: No normalization lexical information found for language '$lang'. Skipping\n";
}
}
print STDERR " NORMALIZER: Loading lexical information for language $lang: done\n";
print STDERR " NORMALIZER: Loading replacement patterns (${ngrams_file_dir}ngrams...)\n";
if (-e "${ngrams_file_dir}ngrams") {
open NGRAMS, "<${ngrams_file_dir}ngrams" || die $!;
binmode NGRAMS, ":utf8";
while (<NGRAMS>) {
/^([^_\t][^\t]*)\t([^\t]+)(\t|$)/ || next;
$in = $1;
$out = $2;
$newout = "";
if ($out =~ /\$\d/ || $in =~ /\\/) {
$in =~ s/(\[\^[^ \]]*) /\1‗/g;
}
@in = split / /, $in;
@out = split / /, $out;
my $j = 1;
if ($#in ne $#out) {
print STDERR " NORMALIZER: Ignoring replacement /$in/$out/ found (different input and output token number)\n";
} else {
for $i (0..$#in) {
if ($out =~ /\$\d/ || $in =~ /\\/) {
while ($in[$i] =~ s/\(.*?\)/\$$j/) {$j++;}
}
$newout .= "{$in[$i]◀".($#in+1)."} $out[$i] ";
}
}
$newout =~ s/ $//;
while ($newout =~ s/(}[^{]*) /$1 {} /g){}
if ($newout =~ /\$\d/ || $in =~ /\\/) {
$ngrams{qr/$in/} = $newout;
} else {
$ngrams{quotemeta($in)} = $newout;
}
}
close NGRAMS;
} else {
print STDERR " NORMALIZER: No replacement patterns found for language '$lang'. Skipping\n";
}
print STDERR " NORMALIZER: Loading replacement patterns: done\n";
} else {
print STDERR " NORMALIZER: No replacement patterns available for language '$lang'. Skipping\n";
}
print STDERR " NORMALIZER: Normalizing...\n";
while (<>) {
chomp;
$_ = " $_ ";
s/}\s*_/} _/g;
$is_maj_only = 0;
$tmp = $_;
$tmp =~ s/◀.*?}/}/g;
$tmp =~ s/{([^{}]+)} _[^ ]+/$1/g;
if ($tmp=~/^[^a-zâäàéèêëïîöôüûùÿ]+$/ && $tmp=~/[A-Z]{5,}/ && length($tmp) > 10) {
$is_maj_only = 1;
$_ = lc($_);
s/}\s*_(url|smiley|email|date[^ ]*|time|heure|adresse|underscore|acc_[of])/"} _".uc($1)/ge;
s/(-[lr][rcs]b-)/uc($1)/ge;
}
if ($has_sxpipe_comments) {
s/{([^{}]+)} *\1( |$)/\1\2/g;
}
for $ngram (sort {(($b=~s/([  ])/\1/g) <=> ($a=~s/([  ])/\1/g)) || (length($b) <=> length($a))} keys %ngrams) {
$t = $ngrams{$ngram};
$t =~ s/ / /g;
$ngram =~ s/ / /g;
$ngram =~ s/‗/ /g;
if ($t =~ /\$/) {
while (/(?<=[^}]) $ngram /) {
@v = ();
$v[1] = $1;
$v[2] = $2;
$v[3] = $3;
$v[4] = $4;
$v[5] = $5;
$v[6] = $6;
$v[7] = $7;
$v[8] = $8;
$v[9] = $9;
$tmp = $t;
for $i (1..9) {
$tmp =~ s/\$$i/$v[$i]/g;
}
s/(?<=[^}]) $ngram / $tmp /;
}
} else {
s/(?<=[^}]) $ngram / $t /g;
}
}
$tmp = $_;
$_ = "";
while ($tmp =~ s/^ *((?:{.*?} )?)(.*?) //) {
$orig = $1;
$target = $2;
$tmptarget = $target;
if ($lang eq "fr") {
if ($orig eq "" && length($target) >= 3 && $target !~ /[{}]/ && !defined($lex{$target}) && defined($glueddet{$target})) {
$_ .= $glueddet{$target}." ";
} elsif ($orig eq "" && length($target) >= 3 && $target !~ /[{}]/ &&!defined($lex{$target}) && defined($gluedclit{$target})) {
$_ .= $gluedclit{$target}." ";
} elsif ($orig eq "" && length($target) >= 3 && $target !~ /[{}]/ &&!defined($lex{$target}) && defined($glued2{$target})) {
$_ .= $glued2{$target}." ";
} elsif ($orig eq "" && length($target) >= 3 && $target !~ /[{}]/ &&!defined($lex{$target}) && defined($gluedj{$target})) {
$_ .= $gluedj{$target}." ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ && !defined($lex{$target}) && defined($lex_final_s{$target."s"})) {
$_ .= "{$target◀s} ${target}s ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && defined($lex_final_t{$target."t"})) {
$_ .= "{$target◀t} ${target}t ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && defined($lex_final_e{$target."e"})) {
$_ .= "{$target◀e} ${target}e ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && $tmptarget =~ s/è/é/g && defined($lex{$tmptarget})) {
$_ .= "{$target◀èé} $tmptarget ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && $tmptarget =~ s/é$/ait/g && defined($lex{$tmptarget})) {
$_ .= "{$target◀éait} $tmptarget ";
} elsif ($orig eq "" && length($target) >= 2 && $target !~ /[{}]/ &&!defined($lex{$target}) && ($tmptarget =~ s/(^|[^w])([w\.])\2\2([^w]|$)/\1 \2 \2 \2 \3/g || 1)
&& $tmptarget =~ s/([^0-9\.])(?:\1){2,}/\1/g) {
$tmptarget =~ s/ ([.]) \1 \1 /\1\1\1/g;
if ($tmptarget =~ /^(.)(.)/ && $1 eq uc($2)) {
$tmptarget =~ s/^(.)./\1/;
}
$_ .= "{$target◀etir} $tmptarget ";
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && $tmptarget =~ /^(.*)k$/ && defined($lex{$1.'que'})) {
$tmptarget =~ s/k$/que/;
$_ .= "{$target◀kque} $tmptarget "; # on ne vérifie même pas que ce soit dans le lex
} elsif ($orig eq "" && length($target) >= 2 && $target =~ /^[a-zâäàéèêëïîöôüûùÿ]+$/ &&!defined($lex{$target}) && $target =~ /[aeé]men$/) {
$_ .= "{$target◀ment} ${target}t "; # on ne vérifie même pas que ce soit dans le lex
} else {
$_ .= $orig.$target." ";
}
} else {
$_ .= $orig.$target." ";
}
}
if ($is_maj_only) {
s/{([^}◀]+)/"{".uc($1)/ge;
s/^ *([^{} ]+)/" {".uc($1)."◀lc} ".$1/ge;
s/(?<=[^}]) ([^{} ]+)(?= )/" {".uc($1)."◀lc} ".$1/ge;
}
s/{([^}◀]+)(?:◀[^}]*)} \1 /\1 /g;
s/{([LDJSldsj])◀1} [LDJldsj]' +$/\1/;
s/ +$//;
s/^ +//;
s/◀[^}]*}/}/g; # à sauter si on veut garder les indicateurs de type de correction
print "$_\n";
}
print STDERR " NORMALIZER: Normalizing: done\n";
#!/usr/bin/perl
binmode STDIN, ":utf8";
binmode STDOUT, ":utf8";
binmode STDERR, ":utf8";
use utf8;
$| = 1;
$remove_non_standard_amalgams = 0;
$tag_amalgam_with_its_last_component_tag = 0;
$keep_token_form_distinction = 0;
$lang = "fr";
while (1) {
$_=shift;
if (/^$/) {last;}
elsif (/^-l(?:ang(?:age)?)?$/) {$lang=shift || die "Please provide a language code after -l option (en, fr)";}
elsif (/^-npp$/) {$no_post_process = 1}
elsif (/^-ktfd$/) {$keep_token_form_distinction = 1}
elsif (/^-rnsa$/) {$remove_non_standard_amalgams = 1}
elsif (/^-alct$/) {$tag_amalgam_with_its_last_component_tag = 1}
}
if ($lang eq "zzz" || $no_post_process) {
while (<>) {
s/^{([^}]+)} _XML\/[^ \n]+$/\1/;
if (/{/ && $keep_token_form_distinction) {
s/◁/\\{/g;
s/▷/\\}/g;
s/_ACC_O/\\{/g;
s/_ACC_F/\\}/g;
} else {
s/{([^}]*)} *[^ ]+(\/[^ \/]+)/replace_whitespaces_with_underscores($1).$2/ge;
s/◁/{/g;
s/▷/}/g;
s/_ACC_O/{/g;
s/_ACC_F/}/g;
}
s/_UNDERSCORE/_/g;
print $_;
}
exit 0;
}
while (<>) {
chomp;
s/^ +//;
s/ +$//;
$out = "";
s/ +/ /g;
# réalignement sur les tokens d'origine (premier pas)
s/^\s*{(.*?)} *_XML\/[^ ]+\s*$/${1}/;
if ($lang eq "en") {
s/(^| )vs\.\/[^ ]+/$1vs\.\/IN/g;
s/(^| )Vince\/[^ ]+/$1Vince\/NNP/g;
s/(^| )Thanks\/[^ ]+/$1Thanks\/NNS/g;
s/(^| )please\/[^ ]+/$1please\/UH/g;
s/(^| )Please\/[^ ]+/$1Please\/UH/g;
s/(^| )([AP]M)\/[^ ]+/$1$2\/NN/g;
while (s/{([^{}]+) ([^{} ]+)} ([^ \/{}]+)\/([^ \/]+)/{$1} ${3}\/GW {$2} ${3}\/$4/g) {}
s/(^| )>\/GW/\1>\/-RRB-/g;
s/(^| )<\/GW/\1<\/-LRB-/g;
s/({ *[^{} ]+ *})\s*_SMILEY\/[^ ]+/$1 _SMILEY\/NFP/g;
s/({ *[^{} ]+ [^{}]+}\s*)_SMILEY\/[^ ]+/$1 _SMILEY\/NFP/g;
s/_URL\/[^ ]+/_URL\/ADD/g;
s/_EMAIL\/[^ ]+/_EMAIL\/ADD/g;
s/_DATE[^ ]*\/[^ ]+/_EMAIL\/CD/g;
s/_(?:TIME|HEURE)\/[^ ]+/_EMAIL\/CD/g;
s/(^| )(l+o+l+|a+r+g+h+|a+h+a+|m+d+r+|p+t+d+r+)\/[^ ]+/$1$2\/NFP/gi; #|♥
s/(^| )([•·\*o])\/[^ ]+/$1$2\/:/g; #?
s/(^| )([^ {}]+\@[^ {}]{2,})\/[^ \/{}]+/\1\2\/ADD/g; # emails
s/(^| )([^ {}]+\.{com,org,net,pdf,docx?})\/[^ \/{}]+/\1\2\/ADD/g; # files
s/(^| )(http[^ {}]+\/[^ {}]+)\/[^ \/{}]+/\1\2\/ADD/g; # URLs
s/(^| )(www\.[^ {}]+)\/[^ \/{}]+/\1\2\/ADD/g; # URLs
s/(^| )([^ {}]+([=_\*-\~]{1,2})\3\3\3[^ {}]+)\/[^ \/{}]+/\1\2\/NFP/g;
s/(^| )(\|)\/[^ \/{}]+/\1\2\/NFP/g;
s/(^| )(s)\/[^ \/{}]+/\1\2\/AFX/g;
s/^([A-Z][^ {}]+)\/[^ \/{}]+ ([^ {}]+\/ADD)/\1\/GW \2/g; # !!!
s/^([A-Z][^ {}]+)\/[^ \/{}]+ ([A-Z])\/[^ \/{}]+ ([^ {}]+\/ADD)/\1\/GW \2\/GW \3/g; # !!!
s/^-\/[^ {}]+ ([A-Z][^ {}]+)\/[^ \/{}]+ ([^ {}]+\/ADD)/-\/NFP \1\/GW \2/g; # !!!
s/^-\/[^ {}]+ ([A-Z][^ {}]+)\/[^ \/{}]+ ([A-Z])\/[^ \/{}]+ ([^ {}]+\/ADD)/-\/NFP \1\/GW \2\/GW \3/g; # !!!
} elsif ($lang eq "fr") {
s/( je\/)[^ ]+/\1CLS/g;
s/^((?:{[^{} ]+} )?)tu\/[^ ]+/\1tu\/CLS/g;
s/( tu\/)[^ ]+ ((?:{[^{} ]+} )?[^ ]+\/VS?)/\1CLS \2/g;
s/({ *[^{} ]+ *})\s*_SMILEY\/[^ ]+/$1 _SMILEY\/I/g;
s/({ *[^{} ]+ [^{}]+})\s*_SMILEY\/[^ ]+/$1 _SMILEY\/X/g;
s/^([0-9\.]+)\/[^ ]+$/\1\/META/;
s/^([0-9\.]+)\/[^ ]+ \.\/[^ ]+$/\1\/META \.\/META/;
s/({\#[^{} ]+}) _URL\/[^ ]+/\1 _URL\/KK/g;
s/({[^\#][^{} ]*}) _URL\/[^ ]+/\1 _URL\/NPP/g;
# s/_URL\/[^ ]+/_URL\/NPP/g;
s/_EMAIL\/[^ ]+/_EMAIL\/NPP/g;
s/(^| )(l+o+l+|a+r+g+h+|a+h+a+|♥)\/[^ ]+/$1$2\/I/gi;
s/(^| )([•·\*o]|\.+)\/[^ ]+/$1$2\/PONCT/g;
s/(^| )(Like|Share)\/[^ ]+/$1$2\/ET/g;
s/(^|$)([^ ]+)\/[^ ]+ (at)\/[^ ]+ (\d+)\/[^ ]+ (:)\/[^ ]+ (\d+(?:[ap]m)?)\/[^ ]+/$1$2\/ADV $3\/P $4\/DET $5\/PONCT $6\/DET/g;
s/(^|$)(\d+)\/[^ ]+ (people)\/[^ ]+ (like)\/[^ ]+ (this)\/[^ ]+/$1$2\/DET $3\/NC $4\/V $5\/PRO/g;
s/(^|$)(\d+)\/[^ ]+ (hours|minutes|seconds)\/[^ ]+ (ago)\/[^ ]+/$1$2\/DET $3\/NC $4\/ADV/g;
s/(^|$)(love)\/[^ ]+ (u|you)\/[^ ]+/$1$2\/V $3\/PRO/g;
# pour smsalpes
s/(^| )\*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ ([A-Z]+)\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+( |$)/$1***$2_$3***\/NPP$4/g;
s/(^| )\*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ ([A-Z]+)\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+( |$)/$1***$2_$3_$4***\/NPP$5/g;
s/(^| )\*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ {([A-Z]+)} [^ ]+\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+( |$)/$1***$2_$3***\/NPP$4/g;
s/(^| )\*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ {([A-Z]+)} [^ ]+\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ (?:{_} _UNDERSCORE|_)\/[^ ]+ ([0-9]+)\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+ \*\/[^ ]+( |$)/$1***$2_$3_$4***\/NPP$5/g;
}
s/}_/} _/g;
$out = "";
# réalignement sur les tokens d'origine
while ($_ ne "") {
if (s/^{([^ {}]+)} ([^ {}]+(?: \{\} *[^ {}]+)+)( |$)//) {
$t = $1;
$f = $2;
$f =~ s/^[^ ]*\///;
$f =~ s/ {} [^ ]*\//+/g;
$t =~ s/^(.*)◀.*/\1/;
if ($f =~ /\+/) {
if ($remove_non_standard_amalgams && $f ne "P+D" && $f ne "P+PRO") {
$f = "X";
} elsif ($tag_amalgam_with_its_last_component_tag) {
$f =~ s/^.*\+//;
}
}
$out .= " $t/$f";
} elsif (s/^{([^ {}]+(?: [^{}]+)+)} ([^ {}]+)\/([^ {}\/]+)( |$)//) {
$t = $1;
$f = $2;
$tag = $3;
$t =~ s/^(.*)◀.*/\1/;
if ($remove_non_standard_amalgams) {
$t =~ s/ /\/Y /g;
$out .= " $t/Y";
} else {
if ($lang eq "fr") {
$t =~ s/ /\/Y /g;
} else {
$t =~ s/ /\/GW /g;
}
$out .= " $t/$tag";
}
} elsif (s/^{([^ {}]+)} ([^ {}]+)( |$)//) {
$t = $1;
$f = $2;
$t =~ s/^(.*)◀.*/\1/;
$f =~ s/^.*\///;
$out .= " $t/$f";
} elsif (s/^([^{} ]+)( |$)//) {
$out .= " $1";
} else {
die $_;
}
s/^ *//;
}
$out =~ s/◁/{/g;
$out =~ s/▷/}/g;
$out =~ s/^ +//;
$out =~ s/ +$//;
print $out."\n";
}
sub replace_whitespaces_with_underscores {
my $s = shift;
$s =~ s/ /_/g;
return $s;
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
xxxxxxxxxxxx.
inf..
prc..
sq..
sqq..
suiv..
sup..
N.B..
d'abord (...ensuite)
d'un ct (...d'un autre ct)
d'une part (...d'autre part)
e.a..
e.g.
et al..
i.e.
ibid..
id..
loc. cit.
op. cit..
q.e.d..
une mdaille d'argent aux J.O.
une mdaille d'or aux J.O.
une mdaille de bronze aux J.O.
c.-.-d.
i.e.
O.K..
B.D
B.D..
C.V..
M.
MM.
Mr.
O.N.U.
P.S..
Q.G..
R.P.
S.A.
S.A..
S.A.R.L.
S.F..
S.O.S..
Tel..
Tl..
adj..
adv..
art.
bibliogr..
boul.
bull.
cap.
ch.
chap.
coll.
collec.
dept.
dir.
dp.
ex..
fasc.
fig.
hab..
ill.
intr..
introd..
ital..
math..
ms.
obs..
p.
p.-s..
paragr.
pl..
pp.
rf..
rd.
s.f
s.f..
sp..
spp..
t.
tel..
trad..
tl..
v.
v.
var.
vol.
zool..
d.
dit..
tym..
Ch.
George W. Bush
J.-C
J.-C.
J.O.
N.-D.
O.N.U.
St. George's
St. John's
Th.
Th.
U.E.
U.R.S.S.
U.S.A.
B.A.ba
O.P.A.
O.P.A.
Q.I.
k.o.
marques de N(sent.)
rosette d'off. de la L.d'h.
(...)
.
...
...
....
[...]
etc.
apr.
av.
cf.
conf.
vs.
apr.
av.
cf.
conf.
vs.
#_error comme verbe est dans v.ilex
# PLY package
# Author: David Beazley (dave@dabeaz.com)
__all__ = ['lex','yacc']
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
t.lexer.lineno += t.value.count("\n")
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print(("%s:%d %s" % (file,line,msg)))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print(("Unable to lex '%s' required for preprocessor" % c))
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in range(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
for tok in x:
if tok in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print(("Couldn't find '%s'" % filename))
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,str):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print((p.source, tok))
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
t.lexer.lineno += t.value.count("\n")
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except StandardError:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
for tok in x:
if tok in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,(str,unicode)):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
# ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (bytes, str)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.__code__
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in list(self.lexstatere.items()):
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in list(self.lexstateerrorf.items()):
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in list(self.lexstatere.items()):
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in list(self.lexstateerrorf.items()):
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in list(lextab._lexstatere.items()):
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in list(lextab._lexstateerrorf.items()):
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def __next__(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in list(lexre.groupindex.items()):
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in list(self.funcsym.values()):
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in list(self.strsym.values()):
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in list(stateinfo.items()):
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in list(stateinfo.items()):
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
#!/usr/bin/env perl
# $Id: segmenteur.pl 5184 2013-05-04 18:23:11Z magistry $
# Principe général
# ----------------
# On procède en deux temps
# 1. tokenisation (on insère ou supprime des blancs:
# à l'issue, un blanc est une frontière de tokens)
# Ceci n'est pas fait si -no_sw
# 2. segmentation (les tokens constitués de points ou
# d'autres ponctuations sont susceptibles d'être des
# frontières de phrases
# L'option -no_s permet de ne faire que la partie 1
# (les frontières de phrases restent de simples blancs)
# Spécificités:
# - on utilise le fichier passé en paramètres (-af=xxx)
# pour ne pas traiter comme un cas normal les points
# terminant des abréviations connues
# - on essaye d'être fin sur les points frontières de
# phrases et les autres
# - on essaye de ne pas mettre de frontière de phrase
# au milieu d'une citation bien balancée
# - l'option -p=[rp] permet de privilégier le rappel
# (les phrases peuvent sans problème commencer par
# une minuscule) ou la précision (les phrases ne
# commencent pas par une minuscule)
# - l'encodage de l'input est lue dans le fichier
# encoding du dossier du lexique correspondant
# à la langue (à défaut, utf8)
use utf8;
use Encode;
$| = 1;
$lang = "fr";
$toksent=1;
$no_af=0;
$no_sw=-1;
$cut_on_apos=0;
$cut_on_hyphen=0;
$sent_bound="\n";
$qsent_bound="_SENT_BOUND";
$print_par_bound = 0;
$weak_sbound = 1;
$affixes = 0; # car normalement géré par text2dag
#$split_before_ne=0;
my $best_recall=0;
my %dict;
my $has_dict = 0;
my $alexinadir = "/usr/local/share/alexina"; # valeur par défaut
binmode STDIN, ":utf8";
binmode STDERR, ":utf8";
binmode STDOUT, ":utf8";
while (1) {
$_=shift;
if (/^$/) {last;}
elsif (/^-s$/ || /^-split-sentences?$/i) {$toksent=1;}
elsif (/^-no_s$/ || /^-no_split[_-]sentences?$/i) {$toksent=0;}
elsif (/^-sb$/ || /^-sentences?-boundary$/i) {$sent_bound=shift;} elsif (/^-sb=(.*)$/ || /^-sentences?-boundary=(.*)$/i) {$sent_bound=$1;}
elsif (/^-qsb$/ || /^-quotes?-sentences?-boundary$/i) {$qsent_bound=shift;} elsif (/^-qsb=(.*)$/ || /^-quotes?-sentences?-boundary=(.*)$/i) {$qsent_bound=$1;}
elsif (/^-no_qsb$/ || /^-no[_-]quotes?-sentences?-boundary$/i) {$qsent_bound="";}
elsif (/^-af$/ || /^-abbrev-file$/i) {$abrfilename=shift;} elsif (/^-af=(.*)$/ || /^-abbrev-file=(.*)$/i) {$abrfilename=$1;}
elsif (/^-no_af$/ || /^-no_abbrev-file$/i) {$no_af=1; $abrfilename=""}
elsif (/^-sw$/ || /^-no_split-words$/i) {$no_sw=0;}
elsif (/^-no_sw$/ || /^-no_split-words$/i) {$no_sw=1;}
elsif (/^-ca$/ || /^-cut_on_apos$/i) {$cut_on_apos=1;}
elsif (/^-ch$/ || /^-cut_on_hypen$/i) {$cut_on_hyphen=1;}
elsif (/^-a$/ || /^-affixes$/i) {$affixes=1;}
elsif (/^-r$/ || /^-p=r$/ || /^-best_sbound_recall$/i) {$initialclass=$l; $best_recall=1} # on segmente bcp
elsif (/^-n$/ || /^-p=p$/ || /^-best_sbound_precision$/i) {$initialclass=$maj;} # on segmente normalement
elsif (/^-p$/) {$initialclass=$maj; $weak_sbound = 0} # on segmente moins
elsif (/^-ppb$/ || /^-print_par_bound$/i) {$print_par_bound = 1;}
elsif (/^-kbl$/ || /^-keep_blank_lines$/i) {$keep_blank_lines = 1;}
elsif (/^-alexinadir$/) {$alexinadir = shift;} elsif (/^-alexinadir=(.*)$/) {$alexinadir = $1;}
elsif (/^-l$/ || /^-lang$/i) {$lang=shift;} elsif (/^-l=(.*)$/ || /^-lang=(.*)$/i) {$lang=$1;}
}
if ($lang =~ /^(fr|en|it|es|pt|sv|no|de)/) {
$min=qr/(?:[a-zæœàåâäãéêèëîïøöôùûüÿçóúíáòì])/;
$maj=qr/(?:[A-ZÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌÆŒ])/;
$l=qr/(?:[æœàåâäãéêèëîïöôùûüÿçøóúíáòìa-zA-ZÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌÆŒ])/;
$nonl=qr/(?:[^a-zæœàåâäãéêèëîïöøôùûüÿçóúíáòìA-ZÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌÆŒ])/;
} elsif ($lang =~ /^(pl|cz|sk|ro|sl|hr|sr|sc|bn|tr|fa|ckb)$/) {
$min=qr/(?:[a-záäąćčďéęěëíĺľłńňóôöŕřśšťúůüýźż])/;
$maj=qr/(?:[A-ZÁÄĄĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻ])/;
$l=qr/(?:[a-záäąćčďéęěëíĺľłńňóôöŕřśšťúůüýźżA-ZÁÄĄĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻ ])/;
$nonl=qr/(?:[^a-záäąćčďéęěëíĺľłńňóôöŕřśšťúůüýźżA-ZÁÄĄĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻ ])/;
} elsif ($lang =~ /^(ru|uk|bg|bl|kk)$/) {
$min=qr/(?:[a-zабвгдежзийклмнопрстуфхцчшщэюяыьё])/;
$maj=qr/(?:[A-ZАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯЫЬЁ])/;
$l=qr/(?:[a-zабвгдежзийклмнопрстуфхцчшщэюяыьёA-ZАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯЫЬЁ ])/;
$nonl=qr/(?:[^a-zабвгдежзийклмнопрстуфхцчшщэюяыьёA-ZАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯЫЬЁ ])/;
} elsif ($lang =~ /^(gr)$/) {
$min=qr/(?:[a-zα-ωάέήίόύώϊϋΐΰ])/;
$maj=qr/(?:[A-ZΑ-ΩΆΈΉΊΌΎΏΪΫ])/;
$l=qr/(?:[a-zA-Zα-ωάέήίόύώϊϋΐΰΑ-ΩΆΈΉΊΌΎΏΪΫ ])/;
$nonl=qr/(?:[^a-zA-Zα-ωάέήίόύώϊϋΐΰΑ-ΩΆΈΉΊΌΎΏΪΫ ])/;
} else {
$min=qr/(?:[a-zæœáäąãćčďéęěëíĺľłńňóôöŕřśšťúůüýźżàåâäãéêèëîïøöôùûüÿçóúíáòì])/;
$maj=qr/(?:[A-ZÁÄĄÃĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌÆŒ])/;
$l=qr/(?:[a-zæœáäąãćčďéęěëíĺľłńňóôöŕřśšťúůüýźżàåâäãéêèëîïøöôùûüÿçóúíáòìA-ZÆŒÁÄĄÃĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌ ])/;
$nonl=qr/(?:[^a-zæœáäąãćčďéęěëíĺľłńňóôöŕřśšťúůüýźżàåâäãéêèëîïøöôùûüÿçóúíáòìA-ZÆŒÁÄĄÃĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻÀÅÃÉÈÊËÂÄÔÖÛÜÇÓÚÍÁÒØÌ ])/;
}
$initialclass=$maj;
# if ($lang ne "" && $u8seg != 1 && -r "$alexinadir/$lang/encoding" && open ENCODING, "<$alexinadir/$lang/encoding") {
# $encoding = <ENCODING>;
# chomp($encoding);
# if ($encoding =~ /^(?:(?:iso)?-?8859-|l(?:atin)?-?)-?(\d+)$/i) {
# binmode STDIN, ":encoding(iso-8859-$1)";
# binmode STDOUT, ":encoding(iso-8859-$1)";
# }
# close ENCODING;
# }
if ($lang =~ /^(zh|tw)$/) {
for (</usr/local/share/alexina/$lang/*.lex>) {
open DICT, "<$_" || next;
binmode DICT, ":utf8";
while (<DICT>) {
chomp;
s/^#.*//;
/^(.)(.*?)\t/ || next;
$tmpdict{$1}{$2} = 1;
$has_dict = 1;
}
close DICT;
}
for $firstchar (keys %tmpdict) {
@{$dict{$firstchar}} = sort {length($b)<=>length($a)} keys %{$tmpdict{$firstchar}};
}
}
if ($no_sw == -1) {
$no_sw = 0;
# if ($lang =~ /^(ja|tw|zh|th|ko)$/) {
if ($lang =~ /^(ja|th|ko)$/) {
$no_sw = 1;
}
}
if ($lang =~ /^(fa|ckb)$/) {$weak_sbound = 2}
$sent_bound =~ s/\\n/\n/g;
$sent_bound =~ s/\\r/\r/g;
$sent_bound =~ s/\\t/\t/g;
$sent_bound =~ s/^"(.*)"$/\1/;
$qsent_bound =~ s/\\n/\n/g;
$qsent_bound =~ s/\\r/\r/g;
$qsent_bound =~ s/\\t/\t/g;
$qsent_bound =~ s/^"(.*)"$/\1/;
$qsent_bound =~ s/\\n/\n/g;
$qsent_bound =~ s/\\r/\r/g;
$qsent_bound =~ s/\\t/\t/g;
$qsent_bound =~ s/^"(.*)"$/\1/;
my $cut_on_apos_re = "";
if ($cut_on_apos) {
if ($lang eq "fr") {
$cut_on_apos_re = join('|', qw/c m n j s t aujourd d l qu puisqu lorsqu quelqu presqu prud quoiqu jusqu/);
$cut_on_apos_re = qr/(?:$cut_on_apos_re)/i;
}
}
@abr=();
@abrp=();
my $temp;
my $temp2;
if ($abrfilename=~/\w/) {
if (open (ABR,"<$abrfilename")) {
while (<ABR>) {
if ((/^\"..+\"$/ || /^[^\"].+$/) && /^[^\#]/) {
chomp;
s/_/ /g;
s/^(.*[^\.].*\.)\.$/$1\_FINABR/; # peuvent être des abréviations finissant par point et finissant une phrase (type etc.)
s/^\"//;
s/\"$//;
$rhs = $_;
$rhs_nospace = $rhs;
$rhs_nospace=~s/_FINABR//;
$rhs_nospace=~s/ //g;
$rhs_nospace2rhs{$rhs_nospace}=$rhs;
s/([\.\[\]\(\)\*\+])/\\$1/g; # échappement des caractères spéciaux
s/^\\\. */\\\. \*/g;
s/(?<=.)\\\. */ \*\\\. \*/g;
if (s/ \*_FINABR$//) {
push(@abr_fin,$rhs);
push(@abrp_fin,$_);
} else {
s/ \*$//;
push(@abr,$rhs);
push(@abrp,$_);
}
}
}
close (ABR);
$abrp_re = join("|",sort {length($b) <=> length($a)} @abrp);
$abrp_re = qr/($abrp_re)/o;
$abrp_fin_re = join("|",sort {length($b) <=> length($a)} @abrp_fin);
$abrp_fin_re = qr/($abrp_fin_re)/o;
} else {
print STDERR "The dot-including abbreviation file $abrfilename could not be opened ($!). Ignoring such abbreviations.\n";
$no_af = 1;
}
} elsif (!$no_af) {
print STDERR "No dot-including abbreviation file given\n";
$no_af = 1;
}
$par_bound = -1;
while (<STDIN>) {
chomp;
if (/ (_XML|_MS_ANNOTATION) *$/) {
print "$_\n";
$par_bound = 0;
next;
} elsif ($par_bound == -1) {
$par_bound = 0;
} else {
$par_bound = 1;
}
if ($par_bound && $print_par_bound) {
print " _PAR_BOUND\n";
}
s/  *{/{ /g;
s/(} *[^ ]+) * / \1 /g;
s/ +  +/   /g;
s/^/ /; s/$/ /;
if (!$no_sw) {
if ($lang !~ /^(ja|zh|tw|th)/) { # si on peut tokeniser soi-même...
s/(?<=[^\.\t])\.+(\s\s+)/ \.$1/g; # si suivi de deux blancs (ou TABs) ou plus, point+ est un token
s/(?<=\t)\.+(\s\s+)/\.$1/g; # idem
s/ +/ /g; # ceci étant exploité, on normalise les espaces
s/\t+/\t/g;
s/ +\t/\t/g; s/\t +/\t/g;
s/(_(?:UNSPLIT|REGLUE|SPECWORD)_[^\s{]+)/\1_PROTECT_/g; # on protège de la segmentation des tokens déjà découpés et identifiés par _UNSPLIT_ ou _REGLUE_
s/({[^{}]+} *$l+)\+($l*)/\1__PLUS__\2/g;
if ($lang =~ /^(fr|es|it)$/) {
s/[ \(\[]\'([^ \'][^\']*?[^ \'])\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ ' \1 ' /g; # les apostrophes peuvent servir à quoter...
s/ / /g;
if ($lang eq "it") {
s/ e\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ {e'} è/g;
s/(?<=[ \/\\:;\"“”\+-])([A-Za]?[a-z]+(?:[ielsrtn]t|[eirdavtp]r|ci|al|ap|of|ol|ag))a\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/{$1a'} $1à/g;
s/(?<=[ \/\\:;\"“”\+-])([A-Za]?[a-z]+(?:pi|n[dt]|bl|iv|u[ld]|gi|ab))u\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/{$1u'} $1ù/g;
s/(?<=[ \/\\:;\"“”\+-])([A-Za]?[a-z]+(?:[eubfvpgmsdczlntir]))o\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/{$1o'} $1ò/g;
}
unless ($lang eq "it") {
s/(?<=[^\'\s_])\'\s*/\'/g; # par défaut, on supprime les blancs suivant les guillemets sauf après " '" et "''", sauf en italien (redondant avec ci-dessus, mais sécurité... ou ci-dessus à supprimer (ou supprimer d'ici... à voir))
}
s/(?<=\s)\'(?=[^ _])/$1\' /g; # en français, espagnol et italien, les autres guillemets sont détachés
} elsif ($lang eq "en") {
s/([a-z])\'([a-z])/\1__APOS__\2/g;
# s/[ \(\[]'([^ '][^']*?[^ '])'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ ' \1 ' /g; # les apostrophes peuvent servir à quoter...
$lq = s/(?<=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])(['`])(?=[^ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/\1/g;
$rq_no_s = s/(?<=[^ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.Ss])'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/'/g;
$sq = s/(?<=[Ss])\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/'/g;
if ($sq == 0 && $lq == 0 && $rq_no_s == 0) {
} elsif ($sq == 0 && $lq == $rq_no_s) {
s/(?<=[ \(\[])([\'\`])([^ \'](?:[^\'])*?[^ \'sS])\'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ {\1} ` \2 ' /g; # les apostrophes peuvent servir à quoter...
} elsif ($sq == 1 && $lq == $rq_no_s) {
s/(?<=[ \(\[])(['\`])([^ '](?:[^']|[sS]')*?[^ 'sS])'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ {\1} ` \2 ' /g; # les apostrophes peuvent servir à quoter...
} else {
$_ = reverse($_);
s/(?<=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])'([^ '](?:[^']|'[sS])*?[^ '])(['\`])(?=[ \(\[])/ ' \1 ` \}\2\{ /g; # les apostrophes peuvent servir à quoter...
$_ = reverse($_);
s/{`} ` /` /g;
}
s/__APOS__/'/g;
s/ / /g;
} elsif ($lang !~ /^(ckb|fa)/) {
s/[ \(\[]'([^ '][^']*?[^ '])'(?=[ ,;?\!:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\˝\&\`\.])/ ' \1 ' /g; # les apostrophes peuvent servir à quoter...
s/ / /g;
}
while (s/({[^}]*) ([^}]*}\s*_(?:EMAIL|SMILEY|EPSILON|URL|META_TEXTUAL_PONCT|SENT_BOUND|ETR|SPECWORD|TMP_[^ ]+))/$1\_SPACE$2/g) {
} # on protège les espaces déjà présents dans ces entités nommées (cf ci dessous)
if ($lang !~ /^(?:fr|en|es|it|nl|de|pt)$/) {
while (s/}( *[^}]*) *(&[^; ]+;|[,;?\!¿¡:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\&])([^ ]+) _(UNSPLIT|REGLUE)/}$1 _$4_$2_PROTECT_ _$4_$3 _$4/g) {}
s/ *(&[^; ]+;|[,;?\!¿¡:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\&]) */ $1 /g; # on isole toutes les ponctuations (et assimilées) sauf le point
if ($lang !~ /^(?:fa|ckb)$/) {
s/ *([\`\˝]) */ $1 /g; # on isole toutes les ponctuations (et assimilées) (˝ est un double accent aigu (hongrois) utilisé parfois comme guillement en l2)
} else {
s/ *([\.]+) */ $1 /g; # on isole toutes les ponctuations (et assimilées)
s/ *( ) */ $1 /g; # on isole toutes les ponctuations (et assimilées)
}
} else {
while (s/}( *[^}]*) *(&[^; ]+;|[,;?\!\¿\¡:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\&\`])([^ _]+) _(UNSPLIT|REGLUE)/}$1 _$4_$2_PROTECT_ _$4_$3 _$4/g) {}
s/ *(&[^; ]+;|[,;?\!\¿\¡:\"“”\)\(\*\#<>\[\]\%\/\\\=\+\«\»\&\`]) */ $1 /g; # on isole toutes les ponctuations (et assimilées) sauf le point
if ($lang eq "fr") {
s/(^| )«  /\1«_NBSP /g; # on attache les espaces insécables à leur ponctuation associée, en les protégeant à cause de l'opération ci-dessous
s/  ([»;:]|[\?\!]+)( |$)/ _NBSP\1\2/g;
s/ *  */   /g; # les espaces insécables restants sont considérés comme des tokens en soi
s/_NBSP/ /g; # on rétablit les espaces insécables associés à des ponctuations
}
}
s/ +([^ ]+) +_PROTECT_/$1_PROTECT_/g;
s/__PLUS__/+/g;
s/ +/ /g;
s/ &quot; / {&quot;} " /g;
s/ &apos;/ {&apos;} '/g;
s/ *(_UNDERSCORE|_ACC_O|_ACC_F) */ $1 /g; # on isole aussi les ponctuations échappées
while (s/({[^}]*) ([^}]*}\s*_(?:META_TEXTUAL_PONCT|SENT_BOUND|ETR|SPECWORD|TMP_[^ ]+))/$1$2/g) {
} # les espaces qu'on vient d'insérer sont mauvais dans
# ces entités nommées: on les enlève...
s/_SPACE/ /g; # ...et on restaure ceux qui y étaient avant
s/_PROTECT_//g;
s/} +_UNSPLIT_/} /g;
if ($lang eq "fr") {
s/($nonl)et\s*\/\s*ou($nonl)/$1 et\/ou $2/g; # cas particulier: cas particulier pour et/ou
} elsif ($lang eq "en") {
s/($nonl)and\s*\/\s*or($nonl)/$1 and\/or $2/g; # cas particulier: cas particulier pour and/or
}
s/\&\s*(amp|quot|lt|gt)\s*;/\&$1;/g; # cas particulier: entités XML
s/ &lt; -/&lt;-/g; # cas particulier: flèches
s/- &gt; /-&gt; /g; # cas particulier: flèches
s/ +\t/\t/g; s/\t +/\t/g;
s/(\.\.*) +/$1 /g; s/(\.\.*) *$/$1 /g; # un seul blanc derrière 2 points ou plus
# les entnom sont des tokens
s/\} *(_[A-Za-z_]+)/"} $1".space_unless_is_SPECWORD($1)/ge;
s/ _(UNSPLIT|REGLUE)_ / _$1_/g;
# POINT après une minuscule
while (s/(} [^ ]+$min)(\.\.*\s*)([\(\[\"“”\)\]\?\!¿¡\'\/\\\_\˝][^{}]*|(?:$l)[^{}]*)?(\{|$)/$1 _UNSPLIT_$2 $3$4/g) {} # avant certaines poncts ou une lettre ou retour-chariot ou {, point+ est un token, mais pas dans les commentaires... !!! cas particulier où on détache la ponct d'un truc qui a déjà un commentaire (expérimental) - peut arriver si on a mis -no_sw sur gl_number.pl mais pas sur segmenteur.pl, par exemple
while (s/($min)(\.\.*\s*)([\(\[\"“”\)\]\?\!¿¡\'\/\\\_\˝][^{}]*|(?:$l)[^{}]*)?(\{|$)/$1 $2 $3$4/g) {} # avant certaines poncts ou une lettre ou retour-chariot ou {, point+ est un token, mais pas dans les commentaires...
s/($min)(\.\.+\s*)([^ ])/$1 $2 $3/g; # avant qqch d'autre (y compris un chiffre), il faut 2 points pour cela
s/(\d)\. /\1 . /g; # ceci dit, chiffre-point-blanc fait du point une ponct (attention, un tel chiffre est dans un bouzin style H1N1 ou G8, sinon il serait _NUM)
s/($maj)\. *$/\1 . /; # ... et en fin de ligne, le point final est une ponct (risqué, cf une phrase se finissant par S.N.C.F.)
s/($maj$maj)\. ($maj)/\1 . \2/g; # pire, les séquences $maj$maj. $maj font du point une ponct (très risqué)
s/ +\t/\t/g; s/\t +/\t/g;
# POINT après une majuscule
s/\b($maj\.)($maj$min)/$1 $2/g; # insertion d'un blanc entre maj-point et maj-min
s/($maj{3,})\./$1 \./g; # 3 majuscules de suite puis point -> le point est une ponct
s/\.($maj{3,})/\. $1/g; # point puis 3 majuscules de suite -> le point est une ponct
# POINT après un chiffre
s/(\d)(\.+)([^0-9])/$1 $2 $3/g; # chiffre point non-chiffre -> le point est une ponct
# TIRETS et slashes
s/(\d)(\s*\-\s*)(\d)/$1 $2 $3/g; # le tiret entre 2 chiffres est une ponct
s/([\(\[\"“”\)\]\,\;\%\˝])\-/$1 -/g; # le tiret après une ponct autre que point en est séparé
s/\-([\(\[\"“”\)\]\,\;\%\˝])/- $1/g; # le tiret avant une ponct autre que point en est séparé
s/($l)([\/\\\-])\s*($l)/$1$2$3/g; # recollages de tirets ou de slashes
s/($l)\s*([\/\\\-])($l)/$1$2$3/g;
s/ +\t/\t/g; s/\t +/\t/g;
s/ *$//g;
s/\n/\#\n/g; # on marque d'un "#" les fins de ligne dans le texte brut
# toilettage final
s/ +\t/\t/g; s/\t +/\t/g;
# BS: lignes suivantes douteuses
# s/\. +([\-\{])/ . $1/g;
# s/([^\{])\{/$1 \{/g;
if ($lang =~ /^(fr|es|it)$/) {
s/(\s)\'(?=[^ _])/$1\' /g; # rebelotte, car des nouveaux blancs ont pu être insérés
}
if ($cut_on_hyphen) {
s/([^ ])-([^ ])/\1 _-_ \2/g;
s/([^ _])-([^ _])/\1 _-_ \2/g;
s/([^ ])-( (?:,|{))/\1 _-_\2/g;
if ($lang eq "fr") {
s/([^ ]t) _-_ (ils?|elles?|on) /$1 {_-_ $2} -$2 /g;
}
}
if ($cut_on_apos && $cut_on_apos_re ne "") {
while (s/ (${cut_on_apos_re})'([^ ])/ \1' \2/g) {}
}
s/(\s[\/\\\.])($l)/$1 $2/g; # changé pour les tirets
s/^ *([\/\\\.])($l)/$1 $2/g; # changé pour les tirets
s/(\s\.)-/$1 -/g;
s/\.(-\s)/. $1/g;
# } elsif (a$lang eq "ja") {
# print STDERR ">$lang: $_\n";
# $_ = join ("/",TinySegmenter->segment($_));
} elsif ($lang =~ /^(tw|zh)$/ && $has_dict) {
s/(?<=[^\s])([\"“”\*\%\«\»\˝]\s)/ _REGLUE_$1/g; # prudence
s/(\s[\"“”\*\%\«\»\˝])([^\s]+)/$1 _REGLUE_$2/g; # prudence
s/(?<=_REGLUE_)\s+_REGLUE_//g;
s/(?<=_UNSPLIT_)\s+_REGLUE_//g;
my $tokenized = "";
my $unparsed = "";
while (s/^(.)//) {
$firstchar = $1;
$did_something = 0;
if (defined($dict{$firstchar})) {
for $n (0..$#{$dict{$firstchar}}) {
$otherchars = $dict{$firstchar}[$n];
$otherchars = quotemeta($otherchars);
if (s/^$otherchars//) {
if ($unparsed ne "") {$tokenized .= $unparsed." "}
$tokenized .= $firstchar.$otherchars." ";
$unparsed = "";
$did_something = 1;
last;
}
}
}
if ($did_something == 0) {
$unparsed .= $firstchar;
}
}
if ($unparsed ne "") {$tokenized .= $unparsed." "}
$_ = $tokenized;
s/{ +/{/g;
s/ +}/}/g;
s/ ^//;
} elsif ($lang =~ /^(zh|tw)/) {
while (s/({[^}]*) /${1}_SPACE/g) {}
s/([\p{Han}])/ $1 /go;
if ($lang =~ /^zh-/){
s/([\p{Thai}])/ $1 /go;
}
s/([-。…,!:、??「」『』…()“”":,\.!'《》();;【】«»\"\*\++==--——-/\/\\℃])(\1*)/ $1$2 /g;
s/ +/ /go;
while (s/({[^}]*) /$1/g) {}
s/_SPACE/ /g;
} else {
s/(?<=[^\s])([\"“”\*\%\«\»\˝]\s)/ _REGLUE_$1/g; # prudence
s/(\s[\"“”\*\%\«\»\˝])([^\s]+)/$1 _REGLUE_$2/g; # prudence
s/(?<=_REGLUE_)\s+_REGLUE_//g;
s/(?<=_UNSPLIT_)\s+_REGLUE_//g;
}
} else {
# print STDERR "$lang: $_\n";
s/(?<=[^\s])([\"“”\*\%\«\»\˝]\s)/ _REGLUE_$1/g; # prudence
s/(\s[\"“”\*\%\«\»\˝])([^\s]+)/$1 _REGLUE_$2/g; # prudence
s/(?<=_REGLUE_)\s+_REGLUE_//g;
s/(?<=_UNSPLIT_)\s+_REGLUE_//g;
}
s/^/ /;
s/$/ /;
# Recollages particuliers
s/(?<=[^\}]) (R \& [Dd]) / {$1} R\&D /go;
if ($lang eq "fr") { # ATTENTION : blanc non convertis en \s
s/(?<=[^\}]) (C ?\. N ?\. R ?\. S ?\.) / {$1} C.N.R.S. /go;
s/(?<=[^\}]) (S ?\. A ?\. R ?\. L ?\.) / {$1} S.A.R.L. /go;
s/(?<=[^\}]) (S ?\. A ?\.) / {$1} S.A. /go;
s/(?<=[^\}]) M +\. / {M .} M. /go;
s/(?<=[^\}]) ([tT][eéEÉ][Ll]) +\. / {\1 .} \1. /go;
s/(?<=[^\}]) \+ \+ / {+ +} ++ /go;
s/(?<=[^\}]) \+ \/ \- / {+ \/ -} +\/- /go;
}
s/(?<=[^\}]) Mr +\. / {Mr .} Mr. /go;
s/(?<=[^\}]) (autocad) / {$1} _SPECWORD_AutoCAD /gi;
if ($lang eq "en") {
if ($expand_contractions) {
s/(?<=[^\}]) ([aA])in't / {\1in't} \1re _UNSPLIT_not /goi;
s/(?<=[^\}]) ([cC]a)n't / {\1n't} \1n _UNSPLIT_not /goi;
s/(?<=[^\}]) ([Ww])on't / {\1on't} \1ill _UNSPLIT_not /goi;
s/(?<=[^\}]) ([^ _][^ ]*)n't / {\1n't} \1 _UNSPLIT_not /goi;
s/(?<=[^\}]) ([Ii])'m / {\1'm} I _UNSPLIT_am /goi;
s/(?<=[^\}]) ([Yy]ou|[Ww]e)'re / {\1're} \1 _UNSPLIT_are /goi;
s/(?<=[^\}]) (I|you|we|they|should|would)'(ve) / {\1've} \1 _UNSPLIT_have /goi;
s/(?<=[^\}]) (I|you|he|she|we|they|there)'(d) / {\1'd} \1 _UNSPLIT_would /goi;
s/(?<=[^\}]) (I|you|he|she|we|they|there)'(ll) / {\1'll} \1 _UNSPLIT_will /goi;
s/(?<=[^\}]) (they)'(re) / {\1're} \1 _UNSPLIT_are /goi;
s/(?<=[^\}]) ([^ ]*[^ s_])'s / {\1's} \1 _UNSPLIT_'s /goi;
s/(?<=[^\}]) ([^ _][^ ]*s)'(?!s |\}.)/ {\1'} \1 _UNSPLIT_'s /goi;
} elsif (0) {
s/(?<=[^\}]) ([aA])in't / {\1in't} \1re _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([cC]a)n't / {\1n't} \1n _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([Ww])on't / {\1on't} \1ill _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([^ ]+)n't / {\1n't} \1 _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([Ii])'m / {\1'm} I _UNSPLIT_'m /goi;
s/(?<=[^\}]) ([Yy]ou|[Ww]e)'re / {\1're} \1 _UNSPLIT_'re /goi;
s/(?<=[^\}]) (I|you|we|they|should|would)'(ve) / {\1've} \1 _UNSPLIT_'ve /goi;
s/(?<=[^\}]) (I|you|he|she|we|they|there)'(d) / {\1'd} \1 _UNSPLIT_'d /goi;
s/(?<=[^\}]) (I|you|he|she|we|they|there)'(ll) / {\1'll} \1 _UNSPLIT_'ll /goi;
s/(?<=[^\}]) (they)'(re) / {\1're} \1 _UNSPLIT_'re /goi;
s/(?<=[^\}]) ([^ ]*[^ s])'s / {\1's} \1 _UNSPLIT_'s /goi;
s/(?<=[^\}]) ([^ ]*s)'(?!s |\}.)/ {\1'} \1 _UNSPLIT_'s /goi;
} else {
s/(?<=[^\}]) ([aA])in't / {\1in't} \1re _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([cC]a)n't / {\1n't} \1n _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([Ww])on't / {\1on't} \1ill _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([^_ ][^ ])n't / {\1n't} \1 _UNSPLIT_n't /goi;
s/(?<=[^\}]) ([Ii])'m / {\1} I 'm /goi;
s/(?<=[^\}]) ([Yy]ou|[Ww]e)'re / \1 're /goi;
s/(?<=[^\}]) (I|you|we|they|should|would)'(ve) / \1 '\2 /goi;
s/(?<=[^\}]) (I|you|he|she|we|they|there)'(d|ll) / \1 '\2 /goi;
s/(?<=[^\}]) (they)'(re) / \1 '\2 /goi;
s/(?<=[^\}]) ([^ ]*[^ s_])'s / \1 's /goi;
s/(?<=[^\}]) ([^ _][^ ]*s)'(?!s |\}.)/ \1 {'} 's /goi;
}
} elsif ($lang eq "fr") {
s/(?<=[^\}]) ([Ss]) ' (\S+)/ {\1 '} \1' \2/goi;
s/(?<=[^\}]) (\') / {$1} " /go;
s/ ([ldnmst]) ([aeéiouy]\S+)/ {\1} \1' \2/goi;
}
s/(?<=[^\}]) \'([^ ]+)\' / {'\1'} ' \1 ' /goi;
if (!$no_sw) {
if ($lang eq "fr") {
# hyphenated suffixes
# doit-elle => doit -elle
# ira-t-elle => ira -t-elle
# ira - t -elle => ira => -t-elle
s/(- ?t ?)?-(ce|elles?|ils?|en|on|je|la|les?|leur|lui|[mt]oi|[vn]ous|tu|y)(?![-a-zA-Z0-9éèêîôûëïüäù])/ $1-$2/go ;
# donne-m'en => donne -m'en
s/(- ?t ?)?-([mlt]\')/ $1-$2/go ;
s/-(née?s?|cl(?:é|ef)s?|ci|là)(?![-a-zA-Z0-9éèêîôûëïüäù])/ _-$1/go ;
# hyphenated prefixes
while (s/(?<=[^\}]) ((?:qu|lorsqu|puisqu|quelqu|quoiqu|[dlnmtcjs])\')(?=[^ ])/ \1 /goi) {}
if ($affixes) {
# ATTENTION: normalement, ce travail est fait plus tard, par text2dag. Ceci ne devrait être utilisé que si l'on utilise sxpipe comme tokenizer pur (i.e., sans faire de vraie différence entre token et forme)
if ($lang eq "fr") {
# hyphenated suffixes
# hyphenated prefixes
# anti-bush => anti- bush
# franco-canadien => franco- canadien
s/((?:anti|non|o|ex|pro)-)/$1 /igo ;
}
}
}
}
s/(?<=[^\}])(\s+)(\( \.\.\. \))(\s+)/$1\{$2} (...)$3/go;
s/(?<=[^\}])(\s+)(\[ \.\.\. \])(\s+)/$1\{$2} (...)$3/go;
s/(?<=[^\}])(\s+)(\! \?)(\s+)/$1\{$2} !?$3/go;
s/(?<=[^\}])(\s+)(\? \!)(\s+)/$1\{$2} ?!$3/go;
s/(?<=[^\}])(\s+)(\!(?: \!)+)(\s+)/$1\{$2} !!!$3/go;
s/(?<=[^\}])(\s+)(\?(?: \?)+)(\s+)/$1\{$2} ???$3/go;
s/(?<=[^\}])(\s+)(\^\s+\^\s+)([^ \{\}]+)/$1\{$2$3\} $3 /go;
s/(?<=[^\}]) ([^ \{\}\(\[]+) \^ ([^ \{\}]+)/{$1 ^ $2} $1\^$2/go;
s/^\s+([^\s\{\}\(\[]+)(\s\^\s)([^\s\{\}]+)/\{$1$2$3\} $1\^$2/go;
s/((?:_UNDERSCORE\s?)+_UNDERSCORE)([^_]|$)/ {$1} _UNDERSCORE $2/go;
s/((?:_ACC_O\s?)+_ACC_O)(\s|$)/ {$1} _ACC_O$2/go;
s/((?:_ACC_F\s?)+_ACC_F)(\s|$)/ {$1} _ACC_F$2/go;
s/(?<=[^\}]) (turn over) / {$1} turn-over /g;
s/(?<=[^\}]) (check liste?) / {$1} check-list /g;
s/(?<=[^\}]) (i-?phone)(s?) / {$1$2} iPhone$2 /gi;
s/(?<=[^\}]) (i-?pad)(s?) / {$1$2} iPad$2 /gi;
s/(?<=[^\}]) (i-?mac)(s?) / {$1$2} iMac$2 /gi;
if ($lang eq "fr") {
#abréviations courantes
s/(?<=[^\}])([- ])([Qq])qfois /$1\{$2qfois} $2elquefois /go;
s/(?<=[^\}])([- ])([Ee])xple /$1\{$2xple} $2xemple /go;
s/(?<=[^\}])([- ])([Bb])cp /$1\{$2cp} $2eaucoup /go;
s/(?<=[^\}])([- ])([Dd])s /$1\{$2s} $2ans /go;
s/(?<=[^\}])([- ])([Mm])(gm?t) /$1\{$2$3} $2anagement /go;
s/(?<=[^\}])([- ])([Nn])s /$1\{$2s} $2ous /go;
s/(?<=[^\}])([- ])([Nn])b /$1\{$2b} $2ombre /go;
s/(?<=[^\}])([- ])([Tt])ps /$1\{$2ps} $2emps /go;
s/(?<=[^\}])([- ])([Tt])(jr?s) /$1\{$2$3} $2oujours /go;
s/(?<=[^\}])([- ])([Qq])que(s?) /$1\{$2ue$3} $2uelque$3 /go;
s/(?<=[^\}])([- ])([Qq])n /$1\{$2n} $2uelqu'un /go;
s/(?<=[^\}])([- ])([Cc])(\.?-?[aà]\.?-?d\.?) /$1\{$2$3} $2'est-à-dire /go;
s/(?<=[^\}])([- ])([Nn])breu(x|ses?) /$1\{$2breu$3} $2ombreu$3 /go;
s/(?<=[^\}])([- ])([^ ]+t)([º°]) /$1\{$2$3} $2ion(s) /go;
s/(?<=[^\}])([- ])([Ss])(nt) /$1\{$2$3} $2ont /go;
s/(?<=[^\}])([- ])(le|du|les|ce) ([wW][Ee]) /$1$2 \{$3} week-end /go;
# fautes courantes
s/(?<=[^\}]) (avant gardiste) / {$1} avant-gardiste /g;
s/(?<=[^\}])([- ])à (fortiori|priori|posteriori|contrario) /$1\{à} a $2 /go;
s/(?<=[^\}])([- ])pa /$1\{pa} pas /go;
s/(?<=[^\}])([- ])er /$1\{er} et /go;
s/(?<=[^\}])([- ])([Qq])uant ([^aà])/$1\{$2uant} $2and $3/go;
s/(?<=[^\}])([- ])QUANT ([^AÀ])/$1\{QUANT} QUAND $2/go;
s/(?<=[^\}])([- ])([Cc]) (est|était) /$1\{$2} $2' $3 /go;
s/(?<=[^\}])([- ])(Etats[- ][Uu]nis) /$1\{$2} États-Unis /go;
s/(?<=[^\}])([- ])([Rr])([eé]num[eé]ration) /$1\{$2$3} $2émunération /go;
s/(?<=[^\}])([- ])c (est|ets) /$1\{c} c' \{$2} est /go;
} elsif ($lang eq "en") {
#abréviations courantes
s/(?<=[^\}])([- ])(acct(?: ?\.)?) /$1\{$2} account /g;
s/(?<=[^\}])([- ])(addl(?: ?\.)?) /$1\{$2} additional /g;
s/(?<=[^\}])([- ])(amt(?: ?\.)?) /$1\{$2} amount /g;
s/(?<=[^\}])([- ])(approx(?: ?\.)?) /$1\{$2} approximately /g;
s/(?<=[^\}])([- ])(assoc(?: ?\.)?) /$1\{$2} associate /g;
s/(?<=[^\}])([- ])(avg(?: ?\.)?) /$1\{$2} average /g;
s/(?<=[^\}])([- ])(bldg(?: ?\.)?) /$1\{$2} building /g;
s/(?<=[^\}])([- ])(incl(?: ?\.)?) /$1\{$2} including /g;
s/(?<=[^\}])([- ])(intl(?: ?\.)?) /$1\{$2} international /g;
#s/(?<=[^\}])([- ])(jan(?: ?\.)?) /$1\{$2} January /g;
#s/(?<=[^\}])([- ])(feb(?: ?\.)?) /$1\{$2} February /g;
#s/(?<=[^\}])([- ])(apr(?: ?\.)?) /$1\{$2} April /g;
#s/(?<=[^\}])([- ])(aug(?: ?\.)?) /$1\{$2} august /g;
#s/(?<=[^\}])([- ])(sep(?: ?\.)?) /$1\{$2} September /g;
#s/(?<=[^\}])([- ])(oct(?: ?\.)?) /$1\{$2} October /g;
#s/(?<=[^\}])([- ])(nov(?: ?\.)?) /$1\{$2} November /g;
#s/(?<=[^\}])([- ])(dec(?: ?\.)?) /$1\{$2} December /g;
s/(?<=[^\}])([- ])(max(?: ?\.)?) /$1\{$2} maximum /g;
s/(?<=[^\}])([- ])(mfg(?: ?\.)?) /$1\{$2} manufacturing /g;
s/(?<=[^\}])([- ])(mgr(?: ?\.)?) /$1\{$2} manager /g;
s/(?<=[^\}])([- ])(mgt(?: ?\.)?) /$1\{$2} management /g;
s/(?<=[^\}])([- ])(mgmt(?: ?\.)?) /$1\{$2} management /g;
s/(?<=[^\}])([- ])(std(?: ?\.)?) /$1\{$2} standard /g;
s/(?<=[^\}])([- ])(w \/ o) /$1\{$2} without /g;
s/(?<=[^\}])([- ])(dept(?: ?\.)?) /$1\{$2} department /g;
s/(?<=[^\}])([- ])(wk(?: ?\.)?) /$1\{$2} week /g;
s/(?<=[^\}])([- ])(div(?: ?\.)?) /$1\{$2} division /g;
s/(?<=[^\}])([- ])(asst(?: ?\.)?) /$1\{$2} assistant /g;
s/(?<=[^\}])([- ])(av(?: ?\.)?) /$1\{$2} average /g;
s/(?<=[^\}])([- ])(avg(?: ?\.)?) /$1\{$2} average /g;
s/(?<=[^\}])([- ])(co(?: ?\.)?) /$1\{$2} company /g;
s/(?<=[^\}])([- ])(hr(?: ?\.)?) /$1\{$2} hour /g;
s/(?<=[^\}])([- ])(hrs(?: ?\.)?) /$1\{$2} hours /g;
s/(?<=[^\}])([- ])(mo(?: ?\.)?) /$1\{$2} month /g;
#s/(?<=[^\}])([- ])(mon(?: ?\.)?) /$1\{$2} Monday /g;
#s/(?<=[^\}])([- ])(tue(?: ?\.)?) /$1\{$2} Tuesday /g;
#s/(?<=[^\}])([- ])(wed(?: ?\.)?) /$1\{$2} Wednesday /g;
#s/(?<=[^\}])([- ])(thu(?: ?\.)?) /$1\{$2} Thursday /g;
#s/(?<=[^\}])([- ])(fri(?: ?\.)?) /$1\{$2} Friday /g;
#s/(?<=[^\}])([- ])(sun(?: ?\.)?) /$1\{$2} Sunday /g;
s/(?<=[^\}])([- ])(no ?\.) /$1\{$2} number /g;
s/(?<=[^\}])([- ])(yr(?: ?\.)?) /$1\{$2} year /g;
s/(?<=[^\}])([- ])(abt) /$1\{$2} about /g;
s/(?<=[^\}])([- ])(jr(?: ?\.)?) /$1\{$2} junior /g;
s/(?<=[^\}])([- ])(jnr(?: ?\.)?) /$1\{$2} junior /g;
s/(?<=[^\}])([- ])(mo(?: ?\.)?) /$1\{$2} month /g;
s/(?<=[^\}])([- ])(mos(?: ?\.)?) /$1\{$2} months /g;
s/(?<=[^\}])([- ])(sr(?: ?\.)?) /$1\{$2} senior /g;
s/(?<=[^\}])([- ])(co-op) /$1\{$2} cooperative /g;
s/(?<=[^\}])([- ])(co(?: ?\.)?) /$1\{$2} company /g;
s/(?<=[^\}])([- ])(cond(?: ?\.)?) /$1\{$2} condition /g;
s/(?<=[^\}])([- ])(corp(?: ?\.)?) /$1\{$2} corporation /g;
s/(?<=[^\}])([- ])(dba(?: ?\.)?) /$1\{$2} doing _UNSPLIT_business _UNSPLIT_as /g;
s/(?<=[^\}])([- ])(dbl(?: ?\.)?) /$1\{$2} double /g;
s/(?<=[^\}])([- ])(ea(?: ?\.)?) /$1\{$2} each /g;
s/(?<=[^\}])([- ])(inc(?: ?\.)?) /$1\{$2} incorporated /g;
s/(?<=[^\}])([- ])(int'l) /$1\{$2} international /g;
s/(?<=[^\}])([- ])(ltd) /$1\{$2} limited /g;
#s/(?<=[^\}])([- ])(m-f(?: ?\.)?) /$1\{$2} Monday _UNSPLIT_through _UNSPLIT_Friday /g;
s/(?<=[^\}])([- ])(misc(?: ?\.)?) /$1\{$2} miscellaneous /g;
s/(?<=[^\}])([- ])(msg(?: ?\.)?) /$1\{$2} message /g;
#s/(?<=[^\}])([- ])(spd(?: ?\.)?) /$1\{$2} Speed /g;
s/(?<=[^\}])([- ])(w(?: ?\. ?)?r(?: ?\. ?)?t(?: ?\.)?) /$1\{$2} with _UNSPLIT_respect _UNSPLIT_to /g;
s/(?<=[^\}])([- ])(e(?: ?\. ?)?g(?: ?\.)?) /$1\{$2} e.g. /g;
s/(?<=[^\}])([- ])(i(?: ?\. ?)?e(?: ?\.)?) /$1\{$2} i.e. /g;
s/(?<=[^\}])([- ])(ibid(?: ?\.)?) /$1\{$2} ibidem /g;
s/(?<=[^\}])([- ])(pb(?: ?\.)?) /$1\{$2} problem /g;
# fautes courantes
s/(?<=[^\}])([- ])(today)(s) /$1\{$2$3} $2 _UNSPLIT_'$3 /goi;
s/(?<=[^\}]) (i) / \{$1} I /go;
s/(?<=[^\}])([- ])([Rr])(enum[ea]ration) /$1\{$2$3} $2muneration /go;
s/(?<=[^\}])([- ])([Aa]t)(le?ast|most|all) /$1\{$2$3} $2 _UNSPLIT_$3 /go;
s/(?<=[^\}])([- ])(wright) /$1\{$2} right /go;
s/(?<=[^\}])([- ])(Wright) /$1\{$2} Right /go;
s/(?<=[^\}])([- ])(Objectie)(s?) /$1\{$2$3} Objective$3 /go;
s/(?<=[^\}])([- ])(do) (note) /$1 $2 \{$3} ___not /go;
s/ ___not (,|that|the|a) / note $1 /go;
s/ ___not / not /go;
} elsif ($lang eq "fa") {
s/(?<=[^\}]) ([^ ]+)_ra / {\1_ra} \1 _UNSPLIT_ra /g;
s/(?<=[^\}]) ([^ ]+[aw])([_ ])y([ymtš]) / {\1\2y\3} \1y\3 /g;
s/(?<=[^\}]) ([^ ]+[ey])([_ ])a([ymtš]) / {\1\2a\3} \1a\3 /g;
s/(?<=[^\}]) ([^ ]+[^ay])([_ ])([ymtš]) / {\1\2\3} \1\3 /g;
s/(?<=[^\}]) ([^ ]+[aw])([_ ])y([mtš]an) / {\1\2y\3} \1y\3 /g;
s/(?<=[^\}]) ([^ ]+[^a])([_ ])([mtš]an) / {\1\2\3} \1\3 /g;
s/(?<=[^\}]) ([^ ]+) (ea) / {\1 \2} \1_\2 /g;
s/(?<=[^\}]) ([^ ]+) (eay[mtš]) / {\1 \2} \1_\2 /g;
s/(?<=[^\}]) ([^ ]+) (eay[mtš]an) / {\1 \2} \1_\2 /g;
s/(?<=[^\}]) (n?my) ([^ {]+) / {\1 \2} \1_\2 /g;
s/(?<=[^\}]) na ([^ {]+) / {na \1} na_\1 /g;
}
# re-collage des points appartenant à des abréviations
if ($no_af != 1) {
if ($lang =~ /^(fr|en|kmr|it|de|es|nl|pt)$/) {
s/(?<=[^a-zàâäéêèëîïöôùûüÿçA-ZÀÉÈÊËÂÄÔÖÛÜÇ\_\-])$abrp_re(\s+[^\s])/{$1} get_normalized_pctabr($1).$2/ge;
s/(?<=[^a-zàâäéêèëîïöôùûüÿçA-ZÀÉÈÊËÂÄÔÖÛÜÇ\_\-])$abrp_fin_re(\s|$)/{$1} get_normalized_pctabr($1).$2/ge;
} elsif ($lang !~ /^(ru|uk|bg)$/) {
s/(?<=[^a-zабвгдежзийклмнопрстуфхцчшщэюяыьёA-ZАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯЫЬЁ\_\-\s])$abrp_re(\s+[^\s])/{$1} get_normalized_pctabr($1).$2/ge;
s/(?<=[^a-zабвгдежзийклмнопрстуфхцчшщэюяыьёA-ZАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЭЮЯЫЬЁ\_\-\s])$abrp_fin_re(\s|$)/{$1} get_normalized_pctabr($1).$2/ge;
} elsif ($lang !~ /^(ja|zh|tw|th)$/) {
s/(?<=[^a-záäąćčďéęěëíĺľłńňóôöŕřśšťúůüýźżA-ZÁÄĄĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻ\_\-\s])$abrp_re(\s+[^\s])/{$1} get_normalized_pctabr($1).$2/ge;
s/(?<=[^a-záäąćčďéęěëíĺľłńňóôöŕřśšťúůüýźżA-ZÁÄĄĆČĎÉĘĚËÍĹŁĽŃŇÓÔÖŔŘŚŠŤÚŮÜÝŹŻ\_\-\s])$abrp_fin_re(\s|$)/{$1} get_normalized_pctabr($1).$2/ge;
}
}
if ($lang !~ /^(ja|zh|tw|th)$/) {
# abréviations en point qui sont en fin de phrase
# _UNSPLIT_ a pour effet que les tokens (dans les commentaires) associés
# à la ponctuation finale seront les mêmes que ceux associés à la forme précédente,
# i.e. à l'abréviation:
# echo "adj." | sxpipe donne:
# {<F id="E1F1">adj</F> <F id="E1F2">.</F>} adj. {<F id="E1F1">adj</F> <F id="E1F2">.</F>} .
# c'est tok2cc/rebuild_easy_tags.pl qui fait ce travail
# on ne le fait que si l'abréviation concernée a le droit de terminer une phrase,
# ce qui est indiqué dans le lexique par le fait qu'elle se termine par 2 points (!)
s/(?<=[^\.\s\_])(\.\_FINABR\.*) *$/\. _UNSPLIT_$1/;
# Il faut maintenant gérer les abrév reconnues dans une entnom (type "{{godz .} godz. 16} _TIME")
if ($no_sw) {
while (s/(\{[^\{\}]*)\{([^\{\}]*)\} [^ ]+/$1$2/g) {
}
} else {
while (s/(\{[^\{\}]*)\{[^\{\}]*\} ([^ ]+)/$1$2/g) {
}
}
s/ */ /g;
s/^ //;
}
# Détachements particuliers
if ($lang eq "pl") {
if ($no_sw) {
s/(^|\s)(przeze|na|za|do|ode|dla|we)(ń)(\s|$)/$1\{$2$3\} $2 \{$2$3\} _$3$4/g;
} else {
s/(^|\s)(przeze|na|za|do|ode|dla|we)(ń)(\s|$)/$1$2 \{$3\} _$3$4/g;
}
}
# SEGMENTATION en phrases si pas d'option -no_s
# ---------------------------------------------
# on identifie de toute façon les frontières de phrases
# - si -no_s, on les indique par un espace
# - sinon, on en a de 2 types:
# * celles repérées par #__# sont remplacées par un retour-chariot,
# * celles reprérées par #_# sont remplacées par $sent_bound,
# qui vaut retour-chariot par défaut mais qui peut être redéfini
# par -sb=XXX (souvent, XXX = _SENT_BOUND)
while (s/({[^}]*) ([^}]*}\s*_(?:EMAIL|SMILEY|EPSILON|URL|META_TEXTUAL_PONCT|SENT_BOUND|ETR|SPECWORD|TMP_[^ ]+))/$1\_SPACE$2/g) {
} # on protège ces entités nommées pour y éviter des tokenisations abusives induites par la segmentation (cf plus bas)
s/ +\t/\t/g; s/\t +/\t/g;
s/ +/ /g;
s/\t\t+/\t/g;
if ($lang =~ /^(ja|zh|tw)$/) {
s/ ([!?\!\?。。]|) / \1 \#__\#/g;
if ($weak_sbound > 0) {
s/ ([:;:;]|) / \1 \#_\#/g;
}
} elsif ($lang !~ /^(ja|zh|tw|th)$/) {
s/([\.:;\?\!])\s*([\"“”\˝]\s*(?:$maj|[\[_\{])[^\"“”\˝]*[\.:;\?\!]\s*[\"“”\˝])\s*(\s$maj|[\[\{]|$)/$1\#\_\#$2\#\_\#$3/g; # détection de phrases entières entre dbl-quotes
s/(?<=\s)(\.\s*_UNDERSCORE)/{$1} ./go; # ad hoc pour mondediplo et ses underscores de fin d'article (?)
# $special_split = ($split_before_ne) ? qr/[\{\[_]/ : qr/[\[_]/;
$special_split = qr/[\{\[_¿¡]/;
if (!$no_sw) {
s/(?<=[^\.])(\.\.*)\s*(\(\.\.\.\))\s($maj|[\[_\{\.])/ $1\#\_\#$2\#\_\#$3/g;
s/([^\.][0-9\}\s]\.\.*)\s*($initialclass|$special_split)/$1\#\_\#$2/g; # CAS STANDARD
s/($l|[\!\?])(\s*\.\.\.)\s($l|$special_split)/$1$2\#\_\#$3/g;
s/(\.\s+\.\.*)\s*($maj|$special_split)/$1\#\_\#$2/g;
s/\_FINABR\s*($initialclass|$special_split)/ _UNSPLIT_.\#\_\#$1/g;
} else {
s/(?<=\s)(\.\.*)(\s+\[\.\.\.\])(\s+$maj|[\[_\{\.])/$1\#\_\#$2\#\_\#$3/g;
s/([^\.]\s+\.\.*)(\s+$initialclass|$special_split)/$1\#\_\#$2/g; # CAS STANDARD
s/($l|[\!\?])(\s*\.\.\.*)(\s+(?:$l|$special_split))/$1$2\#\_\#$3/g;
s/\_FINABR(\s*$initialclass|$special_split)/ _UNSPLIT_.\#\_\#$1/g;
}
s/(\.\s*\.+)(\s+$initialclass|[\[_\{\-\«¿¡])/$1\#\_\#$2/g; # attention !!!
s/([\?\!]\s*\.*)(\s+$initialclass|[\[_\{\-\«¿¡])/$1\#\_\#$2/g; # attention !!!
s/([\.\?\!]\s*\.\.+)(\s+)/$1\#\_\#$2/g; # attention
s/([\.\?\!,:])(\s+[\-\+\«¿¡])/$1\#\_\#$2/g; # attention
if ($weak_sbound > 0) { # si $weak_sbound, on segmente sur les deux-points
s/(:\s*\.*)(\s+$initialclass|[\[_\{\-\«¿¡])/$1\#\_\#$2/g; # attention !!!
s/(:\s*\.\.+)(\s+)/$1\#\_\#$2/g; # attention
}
}
s/(?<!TA_TEXTUAL_PONCT|_META_TEXTUAL_GN)(\s+\{[^\}]*\} _META_TEXTUAL)/\#\_\#$1/g; # attention
if ($lang !~ /^(ja|zh|tw|th)$/) {
if ($best_recall) {
s/(,)(\s+[\-\+])/$1\#\_\#$2/g; # attention
}
while (s/^((?:[^\"“”]*[\"“”\˝][^\"“”]*[\"“”\˝])*[^\"“”]*[\.;\?\!])(\s+[\"“”\˝])/$1\#\_\#$2/g) {} # attention
while (s/^([^\"“”]*[\"“”\˝](?:[^\"“”]*[\"“”\˝][^\"“”]*\"“”)*[^\"“”]*[\.;\?\!]\s+[\"“”\˝])\s+/$1\#\_\#/g) {} # attention
if ($weak_sbound > 0) {
s/(\s+);(\s+)/$1;\#\_\#$2/g; # les points-virgules sont des frontières de phrases ($sent_bound à la sortie, qui peut être retour chariot)
s/;#_# ([\"“”\˝])#_#/; $1#_#/g;
if ($weak_sbound == 2) {
s/(\s+)\.(\s+)/$1 \.\#\_\#$2/g; # cas des langues sans majuscules..
}
}
}
s/$/\#\__\#/; # tout retour chariot dans le source est une frontière de paragraphe (retour chariot à la sortie)
while (s/({[^}]*)(?: |#_+#)([^}]*}\s*_(?:EMAIL|SMILEY|EPSILON|URL|META_TEXTUAL_PONCT|SENT_BOUND|ETR|SPECWORD|TMP_[^ ]+))/$1$2/g) {
}
if ($toksent) { # si on nous demande de segmenter en phrases
if ($lang !~ /^(ja|zh|tw|th)$/) {
# si on a détecté une frontière de phrase, tout point+ qui la précède est à isoler
if (!$no_sw) {
s/([^\.\{_])(\.+\s*)\#\_\#/$1 $2\#_\#/g;
} else {
s/(\s\.+)\s*\#\_\#/$1\#_\#/g;
s/([^\.\s\{_])(\.+)\s*\#\_\#/$1 _REGLUE_$2\#_\#/g;
}
}
while (s/(\{[^\}]*)\#\_\#/$1 /g) {
}
# attention: ces lignes ne gèrent (pour l'instant) pas les profondeurs
# de parenthèses supérieures à 1. Si donc on a "( ( ) #_# )", c'est un peu la cata
if ($lang =~ /^(fa|ckb|ar|he)$/) { # langues qui écrivent les trucs de droite à gauche, et qui donc ont des ouvrantes et fermantes "inversées"
while (s/(\)[^\(]*)\#\_\#/$1 /g) {
}
} else {
while (s/(\([^\)]*)\#\_\#/$1 /g) {
}
}
s/#_+#\s*_(UNSPLIT|REGLUE)/ _$1__SENT_BOUND _$1/g;
# on essaye de ne pas couper dans les citations
my @array = split /(?=[\"“”])/, " $_ ";
$_ = "";
for my $i (0..$#array) {
if ($array[$i] =~ s/^([\"“”])//) {$_ .= $1}
if ($i % 2 == 1) {
$array[$i] =~ s/#_#/ $qsent_bound /g;
} else {
$array[$i] =~ s/^#_#,/ $qsent_bound ,/g;
$array[$i] =~ s/:#_#$/: $qsent_bound /g;
}
$_ .= "$array[$i]";
}
s/\s*\#\_\#\s*/ $sent_bound /g;
s/\s*\#\__\#\s*/\n/g;
} else {
s/\#\_+\#/ /g;
s/$/\n/;
}
s/\_FINABR//g;
s/ +/ /g;
s/(^|\n) +/\1/g;
s/ +$//;
s/_SPACE/ /g; # on restaure ceux qui étaient à l'origine dans les entités nommées
if (/( - .*){8,}/) { # à partir de 8 (choisi au plus juste), on va considérer qu'on est face à une liste
s/ - /\n- /g;
}
# sortie
if ($_!~/^ *$/) { # && $_!~/^-----*$/ && $_!~/^\d+MD\d+$/) {
if (!$no_sw) {
s/(\S){/\1 {/g;
}
print "$_";
} elsif ($keep_blank_lines && /^\s*$/) {
print "\n";
}
}
sub get_normalized_pctabr {
my $s = shift;
$s =~ s/\s+//g;
return $rhs_nospace2rhs{$s};
}
sub space_unless_is_SPECWORD {
my $s = shift;
if ($s =~ /^_SPECWORD/) {return ""}
return " ";
}
from parsing.Taggers import MeltTagger
# from parsing.Taggers.melttagger.tagger import POSTagger, Token, DAGParser, DAGReader
# # references:
# # - http://cs.nyu.edu/grishman/jet/guide/PennPOS.html
# # - http://www.lattice.cnrs.fr/sites/itellier/SEM.html
# class identity_dict(dict):
# def __missing__(self, key):
# return key
# _tag_replacements = identity_dict({
# 'DET': 'DT',
# 'NC': 'NN',
# 'NPP': 'NNP',
# 'ADJ': 'JJ',
# 'PONCT': '.',
# 'ADVWH': 'WRB',
# 'ADV': 'RB',
# 'DETWH': 'WDT',
# 'PROWH': 'WP',
# 'ET': 'FW',
# 'VINF': 'VB',
# 'I': 'UH',
# 'CS': 'IN',
# # 'CLS': '',
# # 'CLR': '',
# # 'CLO': '',
# # 'PRO': '',
# # 'PROREL': '',
# # 'P': '',
# # 'P+D': '',
# # 'P+PRO': '',
# # 'V': '',
# # 'VPR': '',
# # 'VPP': '',
# # 'VS': '',
# # 'VIMP': '',
# # 'PREF': '',
# # 'ADJWH': '',
# })
# import subprocess
# class MeltTagger:
# def __init__(self, language='fr', melt_data_path='./parsing/Taggers/melttagger'):
# path = '%s/%s' % (melt_data_path, language)
# self.pos_tagger = POSTagger()
# self.pos_tagger.load_tag_dictionary('%s/tag_dict.json' % path)
# self.pos_tagger.load_lexicon('%s/lexicon.json' % path)
# self.pos_tagger.load_model('%s' % path)
# self._preprocessing_commands = (
# # ('/usr/local/bin/clean_noisy_characters.sh', ),
# # ('/usr/local/bin/MElt_normalizer.pl', '-nc', '-c', '-d', '/usr/local/share/melt/normalization/%s' % language, '-l', language, ),
# ('/usr/local/share/melt/segmenteur.pl', '-a', '-ca', '-af=/usr/local/share/melt/pctabr', '-p', 'r'),
# )
# self._lemmatization_commands = (
# ('/usr/local/bin/MElt_postprocess.pl', '-npp', '-l', language),
# ('MElt_lemmatizer.pl', '-m', '/usr/local/share/melt/%s' % language),
# )
# def pipe(self, text, commands, encoding='utf8'):
# text = text.encode(encoding)
# # print(text.decode(encoding))
# for command in commands:
# # print(command)
# process = subprocess.Popen(
# command,
# bufsize=0,
# stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# )
# text, err = process.communicate(text)
# # print()
# # print(text.decode(encoding))
# if len(err):
# print(err.decode(encoding))
# return text.decode(encoding)
# def tag(self, text, encoding='utf8', lemmatize=True):
# preprocessed = self.pipe(text, self._preprocessing_commands)
# if lemmatize:
# result = ''
# for sentence in preprocessed.split('\n'):
# words = sentence.split(' ')
# tokens = [Token(word) for word in words]
# tagged_tokens = self.pos_tagger.tag_token_sequence(tokens)
# # result += ' '.join(token.__str__() for token in tagged_tokens)
# for token in tagged_tokens:
# if len(token.string):
# result += '%s/%s ' % (token.string, token.label, )
# result += '\n'
# lemmatized = self.pipe(result, self._lemmatization_commands)
# for sentence in lemmatized.split('\n'):
# for token in sentence.split(' '):
# if len(token):
# yield tuple(token.split('/'))
# else:
# for sentence in preprocessed.split('\n'):
# words = sentence.split(' ')
# tokens = [Token(word) for word in words]
# tagged_tokens = self.pos_tagger.tag_token_sequence(tokens)
# for token in tagged_tokens:
# if len(token.string):
# yield (token.string, _tag_replacements[token.label], )
if __name__ == '__main__':
from time import time
t0 = time()
tagger = MeltTagger()
print(time() - t0)
print()
text = """Le vieil hôtel de ville, construit de 1608 à 1610 est le plus ancien bâtiment de la ville de Wiesbaden. Il se dresse sur la place centrale de la vieille ville, la Place du Palais, qui abrite aujourd'hui le Parlement de l'État de Hesse, l'église et l'hôtel de ville.
Il a été construit dans le style Renaissance. On a ajouté, en 1828, un étage de style romantique historié. Sur les bas-reliefs des cinq fenêtres de l'étage, en bois, étaient représentées les vertus de la force, la justice, la charité, de prudence et de modération, alors que la pierre a remplacé par des copies. Le pièces de chêne d'origine peut être visitées aujourd'hui au Musée de Wiesbaden. Aujourd'hui, le bâtiment sert de bureau de la ville de Wiesbaden.
Devant le porche, entre l'hôtel de Ville et l'Ancien hôtel de ville, se trouve la colonne centrale de Nassau, un lion couronné avec bouclier.
Il s'agit de construire progressivement, à partir des données initiales, un sous-graphe dans lequel sont classés les différents sommets par ordre croissant de leur distance minimale au sommet de départ. La distance correspond à la somme des poids des arêtes empruntées.
Au départ, on considère que les distances de chaque sommet au sommet de départ sont infinies. Au cours de chaque itération, on va mettre à jour les distances des sommets reliés par un arc au dernier du sous-graphe (en ajoutant le poids de l'arc à la distance séparant ce dernier sommet du sommet de départ ; si la distance obtenue ainsi est supérieure à celle qui précédait, la distance n'est cependant pas modifiée). Après cette mise à jour, on examine l'ensemble des sommets qui ne font pas partie du sous-graphe, et on choisit celui dont la distance est minimale pour l'ajouter au sous-graphe.
La première étape consiste à mettre de côté le sommet de départ et à lui attribuer une distance de 0. Les sommets qui lui sont adjacents sont mis à jour avec une valeur égale au poids de l'arc qui les relie au sommet de départ (ou à celui de poids le plus faible si plusieurs arcs les relient) et les autres sommets conservent leur distance infinie.
Le plus proche des sommets adjacents est alors ajouté au sous-graphe.
La seconde étape consiste à mettre à jour les distances des sommets adjacents à ce dernier. Encore une fois, on recherche alors le sommet doté de la distance la plus faible. Comme tous les sommets n'avaient plus une valeur infinie, il est donc possible que le sommet choisi ne soit pas un des derniers mis à jour.
On l'ajoute au sous-graphe, puis on continue ainsi à partir du dernier sommet ajouté, jusqu'à épuisement des sommets ou jusqu'à sélection du sommet d'arrivée.
"""
i = 0
t0 = time()
for x in tagger.tag_text(text, lemmatize=True):
print(x)
i += 1
t = time() - t0
print(t)
print(t / i)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment