summaryrefslogtreecommitdiff
path: root/python/cdec
diff options
context:
space:
mode:
authorVictor Chahuneau <vchahune@cs.cmu.edu>2012-07-27 01:16:03 -0400
committerVictor Chahuneau <vchahune@cs.cmu.edu>2012-07-27 01:16:03 -0400
commit8fdc3681fb7551e7faeff9f720102cdd417ba077 (patch)
tree1129d2b79a3255c249e181141814cb92b52b4d4d /python/cdec
parent0aac9fd78f1c8b9ba3d91d702f592288075cbbde (diff)
[python] Fork of the suffix-array extractor with surface improvements
Available as the cdec.sa module, with commande-line helpers: python -m cdec.sa.compile -f ... -e ... -a ... -o sa-out/ -c extract.ini python -m cdec.sa.extract -c extract.ini -g grammars-out/ < input.txt > input.sgml + renamed cdec.scfg -> cdec.sa + Python README
Diffstat (limited to 'python/cdec')
-rw-r--r--python/cdec/sa/__init__.py4
-rw-r--r--python/cdec/sa/compile.py94
-rw-r--r--python/cdec/sa/extract.py32
-rw-r--r--python/cdec/sa/extractor.py73
-rw-r--r--python/cdec/sa/features.py (renamed from python/cdec/scfg/features.py)16
-rw-r--r--python/cdec/scfg/__init__.py1
-rw-r--r--python/cdec/scfg/extractor.py120
7 files changed, 210 insertions, 130 deletions
diff --git a/python/cdec/sa/__init__.py b/python/cdec/sa/__init__.py
new file mode 100644
index 00000000..ddefa280
--- /dev/null
+++ b/python/cdec/sa/__init__.py
@@ -0,0 +1,4 @@
+from _cdec_sa import sym_tostring, sym_isvar, sym_fromstring,\
+ SuffixArray, DataArray, LCP, Precomputation, Alignment, BiLex,\
+ HieroCachingRuleFactory, Sampler
+from extractor import GrammarExtractor
diff --git a/python/cdec/sa/compile.py b/python/cdec/sa/compile.py
new file mode 100644
index 00000000..061cdab2
--- /dev/null
+++ b/python/cdec/sa/compile.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+import argparse
+import os
+import logging
+import configobj
+import cdec.sa
+
+MAX_PHRASE_LENGTH = 4
+def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2):
+ lcp = cdec.sa.LCP(f_sa)
+ stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True)
+ precomp = cdec.sa.Precomputation(from_stats=stats,
+ fsarray=f_sa,
+ precompute_rank=rank1,
+ precompute_secondary_rank=rank2,
+ max_length=max_len,
+ max_nonterminals=max_nt,
+ train_max_initial_size=max_size,
+ train_min_gap_size=min_gap)
+ return precomp
+
+def main():
+ logging.basicConfig(level=logging.INFO)
+ logger = logging.getLogger('cdec.sa.compile')
+ parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.')
+ parser.add_argument('--maxnt', '-n', type=int, default=2,
+ help='Maximum number of non-terminal symbols')
+ parser.add_argument('--maxlen', '-l', type=int, default=5,
+ help='Maximum number of terminals')
+ parser.add_argument('--maxsize', '-s', type=int, default=15,
+ help='Maximum rule span')
+ parser.add_argument('--mingap', '-g', type=int, default=1,
+ help='Minimum gap size')
+ parser.add_argument('--rank1', '-r1', type=int, default=100,
+ help='Number of pre-computed frequent patterns')
+ parser.add_argument('--rank2', '-r2', type=int, default=10,
+ help='Number of pre-computed super-frequent patterns)')
+ parser.add_argument('-c', '--config', default='/dev/stdout',
+ help='Output configuration')
+ parser.add_argument('-o', '--output', required=True,
+ help='Output path')
+ parser.add_argument('-f', '--source', required=True,
+ help='Source language corpus')
+ parser.add_argument('-e', '--target', required=True,
+ help='Target language corpus')
+ parser.add_argument('-a', '--alignment', required=True,
+ help='Bitext word alignment')
+ args = parser.parse_args()
+
+ param_names = ("max_len", "max_nt", "max_size", "min_gap", "rank1", "rank2")
+ params = (args.maxlen, args.maxnt, args.maxsize, args.mingap, args.rank1, args.rank2)
+
+ if not os.path.exists(args.output):
+ os.mkdir(args.output)
+
+ f_sa_bin = os.path.join(args.output, 'f.sa.bin')
+ e_bin = os.path.join(args.output, 'e.bin')
+ precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params)
+ precomp_bin = os.path.join(args.output, precomp_file)
+ a_bin = os.path.join(args.output, 'a.bin')
+ lex_bin = os.path.join(args.output, 'lex.bin')
+
+ logger.info('Compiling source suffix array')
+ f_sa = cdec.sa.SuffixArray(from_text=args.source)
+ f_sa.write_binary(f_sa_bin)
+
+ logger.info('Compiling target data array')
+ e = cdec.sa.DataArray(from_text=args.target)
+ e.write_binary(e_bin)
+
+ logger.info('Precomputing frequent phrases')
+ precompute(f_sa, *params).write_binary(precomp_bin)
+
+ logger.info('Compiling alignment')
+ a = cdec.sa.Alignment(from_text=args.alignment)
+ a.write_binary(a_bin)
+
+ logger.info('Compiling bilexical dictionary')
+ lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa)
+ lex.write_binary(lex_bin)
+
+ # Write configuration
+ config = configobj.ConfigObj(args.config, unrepr=True)
+ config['f_sa_file'] = f_sa_bin
+ config['e_file'] = e_bin
+ config['a_file'] = a_bin
+ config['lex_file'] = lex_bin
+ config['precompute_file'] = precomp_bin
+ for name, value in zip(param_names, params):
+ config[name] = value
+ config.write()
+
+if __name__ == '__main__':
+ main()
diff --git a/python/cdec/sa/extract.py b/python/cdec/sa/extract.py
new file mode 100644
index 00000000..c6da5e9d
--- /dev/null
+++ b/python/cdec/sa/extract.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+import sys
+import os
+import argparse
+import logging
+import configobj
+import cdec.sa
+
+def main():
+ logging.basicConfig(level=logging.INFO)
+ parser = argparse.ArgumentParser(description='Extract grammars from a compiled corpus.')
+ parser.add_argument('-c', '--config', required=True,
+ help='Extractor configuration')
+ parser.add_argument('-g', '--grammars', required=True,
+ help='Grammar output path')
+ args = parser.parse_args()
+
+ if not os.path.exists(args.grammars):
+ os.mkdir(args.grammars)
+
+ extractor = cdec.sa.GrammarExtractor(configobj.ConfigObj(args.config, unrepr=True))
+ for i, sentence in enumerate(sys.stdin):
+ sentence = sentence[:-1]
+ grammar_file = os.path.join(args.grammars, 'grammar.{0}'.format(i))
+ with open(grammar_file, 'w') as output:
+ for rule in extractor.grammar(sentence):
+ output.write(str(rule)+'\n')
+ grammar_file = os.path.abspath(grammar_file)
+ print('<seg grammar="{0}">{1}</seg>'.format(grammar_file, sentence))
+
+if __name__ == '__main__':
+ main()
diff --git a/python/cdec/sa/extractor.py b/python/cdec/sa/extractor.py
new file mode 100644
index 00000000..c97b3c6f
--- /dev/null
+++ b/python/cdec/sa/extractor.py
@@ -0,0 +1,73 @@
+from itertools import chain
+from cdec.sa.features import EgivenFCoherent, SampleCountF, CountEF,\
+ MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE
+import cdec.sa
+
+# maximum span of a grammar rule in TEST DATA
+MAX_INITIAL_SIZE = 15
+
+class GrammarExtractor:
+ def __init__(self, config):
+ # TODO if str, read config
+ alignment = cdec.sa.Alignment(from_binary=config['a_file'])
+ self.factory = cdec.sa.HieroCachingRuleFactory(
+ # compiled alignment object (REQUIRED)
+ alignment,
+ # name of generic nonterminal used by Hiero
+ category="[X]",
+ # maximum number of contiguous chunks of terminal symbols in RHS of a rule
+ max_chunks=config['max_nt']+1,
+ # maximum span of a grammar rule in TEST DATA
+ max_initial_size=MAX_INITIAL_SIZE,
+ # maximum number of symbols (both T and NT) allowed in a rule
+ max_length=config['max_len'],
+ # maximum number of nonterminals allowed in a rule (set >2 at your own risk)
+ max_nonterminals=config['max_nt'],
+ # maximum number of contiguous chunks of terminal symbols
+ # in target-side RHS of a rule.
+ max_target_chunks=config['max_nt']+1,
+ # maximum number of target side symbols (both T and NT) allowed in a rule.
+ max_target_length=MAX_INITIAL_SIZE,
+ # minimum span of a nonterminal in the RHS of a rule in TEST DATA
+ min_gap_size=1,
+ # filename of file containing precomputed collocations
+ precompute_file=config['precompute_file'],
+ # maximum frequency rank of patterns used to compute triples (< 20)
+ precompute_secondary_rank=config['rank2'],
+ # maximum frequency rank of patterns used to compute collocations (< 300)
+ precompute_rank=config['rank1'],
+ # require extracted rules to have at least one aligned word
+ require_aligned_terminal=True,
+ # require each contiguous chunk of extracted rules
+ # to have at least one aligned word
+ require_aligned_chunks=False,
+ # maximum span of a grammar rule extracted from TRAINING DATA
+ train_max_initial_size=config['max_size'],
+ # minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA
+ train_min_gap_size=config['min_gap'],
+ # True if phrases should be tight, False otherwise (better but slower)
+ tight_phrases=True,
+ )
+
+ # lexical weighting tables
+ tt = cdec.sa.BiLex(from_binary=config['lex_file'])
+
+ self.models = (EgivenFCoherent, SampleCountF, CountEF,
+ MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE)
+
+ fsarray = cdec.sa.SuffixArray(from_binary=config['f_sa_file'])
+ edarray = cdec.sa.DataArray(from_binary=config['e_file'])
+
+ # lower=faster, higher=better; improvements level off above 200-300 range,
+ # -1 = don't sample, use all data (VERY SLOW!)
+ sampler = cdec.sa.Sampler(300, fsarray)
+
+ self.factory.configure(fsarray, edarray, sampler)
+
+ def grammar(self, sentence):
+ if isinstance(sentence, unicode):
+ sentence = sentence.encode('utf8')
+ cnet = chain(('<s>',), sentence.split(), ('</s>',))
+ cnet = (cdec.sa.sym_fromstring(word, terminal=True) for word in cnet)
+ cnet = tuple(((word, None, 1), ) for word in cnet)
+ return self.factory.input(cnet, self.models)
diff --git a/python/cdec/scfg/features.py b/python/cdec/sa/features.py
index 6419cdd8..8d35d8e6 100644
--- a/python/cdec/scfg/features.py
+++ b/python/cdec/sa/features.py
@@ -1,10 +1,6 @@
from __future__ import division
import math
-import sym
-
-def contextless(feature):
- feature.compute_contextless_score = feature
- return feature
+import cdec.sa
MAXSCORE = 99
@@ -26,8 +22,9 @@ def CoherenceProb(fphrase, ephrase, paircount, fcount, fsample_count):
def MaxLexEgivenF(ttable):
def feature(fphrase, ephrase, paircount, fcount, fsample_count):
- fwords = [sym.tostring(w) for w in fphrase if not sym.isvar(w)] + ['NULL']
- ewords = (sym.tostring(w) for w in ephrase if not sym.isvar(w))
+ fwords = [cdec.sa.sym_tostring(w) for w in fphrase if not cdec.sa.sym_isvar(w)]
+ fwords.append('NULL')
+ ewords = (cdec.sa.sym_tostring(w) for w in ephrase if not cdec.sa.sym_isvar(w))
def score():
for e in ewords:
maxScore = max(ttable.get_score(f, e, 0) for f in fwords)
@@ -37,8 +34,9 @@ def MaxLexEgivenF(ttable):
def MaxLexFgivenE(ttable):
def feature(fphrase, ephrase, paircount, fcount, fsample_count):
- fwords = (sym.tostring(w) for w in fphrase if not sym.isvar(w))
- ewords = [sym.tostring(w) for w in ephrase if not sym.isvar(w)] + ['NULL']
+ fwords = (cdec.sa.sym_tostring(w) for w in fphrase if not cdec.sa.sym_isvar(w))
+ ewords = [cdec.sa.sym_tostring(w) for w in ephrase if not cdec.sa.sym_isvar(w)]
+ ewords.append('NULL')
def score():
for f in fwords:
maxScore = max(ttable.get_score(f, e, 1) for e in ewords)
diff --git a/python/cdec/scfg/__init__.py b/python/cdec/scfg/__init__.py
deleted file mode 100644
index 6eb2f88f..00000000
--- a/python/cdec/scfg/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from extractor import GrammarExtractor
diff --git a/python/cdec/scfg/extractor.py b/python/cdec/scfg/extractor.py
deleted file mode 100644
index 1dfa2421..00000000
--- a/python/cdec/scfg/extractor.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import sys, os
-import re
-import StringIO
-from itertools import chain
-
-import clex
-import rulefactory
-import calignment
-import csuf
-import cdat
-import sym
-import log
-
-from features import EgivenFCoherent, SampleCountF, CountEF,\
- MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE
-from features import contextless
-
-log.level = -1
-
-class Output(StringIO.StringIO):
- def close(self):
- pass
-
- def __str__(self):
- return self.getvalue()
-
-def get_cn(sentence):
- sentence = chain(('<s>',), sentence.split(), ('</s>',))
- sentence = (sym.fromstring(word, terminal=True) for word in sentence)
- return tuple(((word, None, 1), ) for word in sentence)
-
-class PhonyGrammar:
- def add(self, thing):
- pass
-
-class GrammarExtractor:
- def __init__(self, cfg):
- if isinstance(cfg, dict):
- config = cfg
- elif isinstance(cfg, str):
- cfg_file = os.path.basename(cfg)
- if not re.match(r'^\w+\.py$', cfg_file):
- raise ValueError('Config must be a *.py file')
- sys.path.append(os.path.dirname(cfg))
- config = __import__(cfg_file.replace('.py', '')).__dict__
- sys.path.pop()
- alignment = calignment.Alignment(config['a_file'], from_binary=True)
- self.factory = rulefactory.HieroCachingRuleFactory(
- # compiled alignment object (REQUIRED)
- alignment=alignment,
- # name of generic nonterminal used by Hiero
- category="[X]",
- # do not change for extraction
- grammar=PhonyGrammar(), # TODO: set to None?
- # maximum number of contiguous chunks of terminal symbols in RHS of a rule. If None, defaults to max_nonterminals+1
- max_chunks=None,
- # maximum span of a grammar rule in TEST DATA
- max_initial_size=15,
- # maximum number of symbols (both T and NT) allowed in a rule
- max_length=config['max_len'],
- # maximum number of nonterminals allowed in a rule (set >2 at your own risk)
- max_nonterminals=config['max_nt'],
- # maximum number of contiguous chunks of terminal symbols in target-side RHS of a rule. If None, defaults to max_nonterminals+1
- max_target_chunks=None,
- # maximum number of target side symbols (both T and NT) allowed in a rule. If None, defaults to max_initial_size
- max_target_length=None,
- # minimum span of a nonterminal in the RHS of a rule in TEST DATA
- min_gap_size=1,
- # filename of file containing precomputed collocations
- precompute_file=config['precompute_file'],
- # maximum frequency rank of patterns used to compute triples (don't set higher than 20).
- precompute_secondary_rank=config['rank2'],
- # maximum frequency rank of patterns used to compute collocations (no need to set higher than maybe 200-300)
- precompute_rank=config['rank1'],
- # require extracted rules to have at least one aligned word
- require_aligned_terminal=True,
- # require each contiguous chunk of extracted rules to have at least one aligned word
- require_aligned_chunks=False,
- # generate a complete grammar for each input sentence
- per_sentence_grammar=True,
- # maximum span of a grammar rule extracted from TRAINING DATA
- train_max_initial_size=config['max_size'],
- # minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA
- train_min_gap_size=config['min_gap'],
- # True if phrases should be tight, False otherwise (False seems to give better results but is slower)
- tight_phrases=True,
- )
- self.fsarray = csuf.SuffixArray(config['f_sa_file'], from_binary=True)
- self.edarray = cdat.DataArray(config['e_file'], from_binary=True)
-
- self.factory.registerContext(self)
-
- # lower=faster, higher=better; improvements level off above 200-300 range, -1 = don't sample, use all data (VERY SLOW!)
- self.sampler = rulefactory.Sampler(300)
- self.sampler.registerContext(self)
-
- # lexical weighting tables
- tt = clex.CLex(config['lex_file'], from_binary=True)
-
- self.models = (EgivenFCoherent, SampleCountF, CountEF,
- MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE)
- self.models = tuple(contextless(feature) for feature in self.models)
-
- def grammar(self, sentence):
- if isinstance(sentence, unicode):
- sentence = sentence.encode('utf8')
- out = Output()
- cn = get_cn(sentence)
- self.factory.input(cn, output=out)
- return str(out)
-
-def main(config):
- extractor = GrammarExtractor(config)
- sys.stdout.write(extractor.grammar(next(sys.stdin)))
-
-if __name__ == '__main__':
- if len(sys.argv) != 2 or not sys.argv[1].endswith('.py'):
- sys.stderr.write('Usage: %s config.py\n' % sys.argv[0])
- sys.exit(1)
- main(*sys.argv[1:])