From ee681f3d0337dc3d93c25de373a50fbda69252fe Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Mon, 21 Jan 2013 17:28:19 -0500 Subject: tokenizer support for utf8 patterns --- corpus/support/token_patterns | 1 + corpus/support/tokenizer.pl | 2 ++ 2 files changed, 3 insertions(+) (limited to 'corpus/support') diff --git a/corpus/support/token_patterns b/corpus/support/token_patterns index 8e69432b..b25ac6de 100644 --- a/corpus/support/token_patterns +++ b/corpus/support/token_patterns @@ -1,3 +1,4 @@ /^(al|el|ul|e)\-[a-z]+$/ +/^((а|А)(ль|ш)|уль)-\p{Cyrillic}+$/ /^(\d|\d\d|\d\d\d)\.$/ diff --git a/corpus/support/tokenizer.pl b/corpus/support/tokenizer.pl index b5190858..0350a894 100755 --- a/corpus/support/tokenizer.pl +++ b/corpus/support/tokenizer.pl @@ -73,6 +73,7 @@ my $dict_file = "$workdir/token_list"; my $word_patt_file = "$workdir/token_patterns"; open(my $dict_fp, "$dict_file") or die; +binmode($dict_fp, ":utf8"); # read in the list of words that should not be segmented, ## e.g.,"I.B.M.", co-operation. @@ -89,6 +90,7 @@ while(<$dict_fp>){ } open(my $patt_fp, "$word_patt_file") or die; +binmode($patt_fp, ":utf8"); my @word_patts = (); my $word_patt_num = 0; while(<$patt_fp>){ -- cgit v1.2.3