summaryrefslogtreecommitdiff
path: root/python/cdec
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-06-13 14:42:07 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-06-13 14:42:07 +0200
commite6d3c25191873ca0cf99db8e89702ed91d65277c (patch)
treeb0697ece6f5e4a8229915758c68750793a23f776 /python/cdec
parent62c805c90c5347b844f92574e240db5c65578e12 (diff)
parent3acdf1e4b37637d6df86a7b54fb0f1b0464c172b (diff)
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'python/cdec')
-rw-r--r--python/cdec/__init__.py1
-rw-r--r--python/cdec/scfg/__init__.py1
-rw-r--r--python/cdec/scfg/extractor.py112
-rw-r--r--python/cdec/scfg/features.py62
4 files changed, 176 insertions, 0 deletions
diff --git a/python/cdec/__init__.py b/python/cdec/__init__.py
new file mode 100644
index 00000000..c821f860
--- /dev/null
+++ b/python/cdec/__init__.py
@@ -0,0 +1 @@
+from _cdec import Decoder, Hypergraph, Lattice
diff --git a/python/cdec/scfg/__init__.py b/python/cdec/scfg/__init__.py
new file mode 100644
index 00000000..6eb2f88f
--- /dev/null
+++ b/python/cdec/scfg/__init__.py
@@ -0,0 +1 @@
+from extractor import GrammarExtractor
diff --git a/python/cdec/scfg/extractor.py b/python/cdec/scfg/extractor.py
new file mode 100644
index 00000000..9f1e1137
--- /dev/null
+++ b/python/cdec/scfg/extractor.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+import StringIO
+
+import clex
+import rulefactory
+import calignment
+import csuf
+import cdat
+import sym
+import log
+
+log.level = -1
+
+from features import EgivenFCoherent, SampleCountF, CountEF,\
+ MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE
+from features import contextless
+
+class Output(StringIO.StringIO):
+ def close(self):
+ pass
+
+ def __str__(self):
+ return self.getvalue()
+
+from itertools import chain
+
+def get_cn(sentence):
+ sentence = chain(('<s>',), sentence.split(), ('</s>',))
+ sentence = (sym.fromstring(word, terminal=True) for word in sentence)
+ return tuple(((word, None, 1), ) for word in sentence)
+
+class PhonyGrammar:
+ def add(self, thing):
+ pass
+
+class GrammarExtractor:
+ def __init__(self, config):
+ alignment = calignment.Alignment(config['a_file'], from_binary=True)
+ self.factory = rulefactory.HieroCachingRuleFactory(
+ # compiled alignment object (REQUIRED)
+ alignment=alignment,
+ # name of generic nonterminal used by Hiero
+ category="[X]",
+ # do not change for extraction
+ grammar=PhonyGrammar(), # TODO: set to None?
+ # maximum number of contiguous chunks of terminal symbols in RHS of a rule. If None, defaults to max_nonterminals+1
+ max_chunks=None,
+ # maximum span of a grammar rule in TEST DATA
+ max_initial_size=15,
+ # maximum number of symbols (both T and NT) allowed in a rule
+ max_length=config['max_len'],
+ # maximum number of nonterminals allowed in a rule (set >2 at your own risk)
+ max_nonterminals=config['max_nt'],
+ # maximum number of contiguous chunks of terminal symbols in target-side RHS of a rule. If None, defaults to max_nonterminals+1
+ max_target_chunks=None,
+ # maximum number of target side symbols (both T and NT) allowed in a rule. If None, defaults to max_initial_size
+ max_target_length=None,
+ # minimum span of a nonterminal in the RHS of a rule in TEST DATA
+ min_gap_size=1,
+ # filename of file containing precomputed collocations
+ precompute_file=config['precompute_file'],
+ # maximum frequency rank of patterns used to compute triples (don't set higher than 20).
+ precompute_secondary_rank=config['rank2'],
+ # maximum frequency rank of patterns used to compute collocations (no need to set higher than maybe 200-300)
+ precompute_rank=config['rank1'],
+ # require extracted rules to have at least one aligned word
+ require_aligned_terminal=True,
+ # require each contiguous chunk of extracted rules to have at least one aligned word
+ require_aligned_chunks=False,
+ # generate a complete grammar for each input sentence
+ per_sentence_grammar=True,
+ # maximum span of a grammar rule extracted from TRAINING DATA
+ train_max_initial_size=config['max_size'],
+ # minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA
+ train_min_gap_size=config['min_gap'],
+ # True if phrases should be tight, False otherwise (False seems to give better results but is slower)
+ tight_phrases=True,
+ )
+ self.fsarray = csuf.SuffixArray(config['f_sa_file'], from_binary=True)
+ self.edarray = cdat.DataArray(config['e_file'], from_binary=True)
+
+ self.factory.registerContext(self)
+
+ # lower=faster, higher=better; improvements level off above 200-300 range, -1 = don't sample, use all data (VERY SLOW!)
+ self.sampler = rulefactory.Sampler(300)
+ self.sampler.registerContext(self)
+
+ # lexical weighting tables
+ tt = clex.CLex(config['lex_file'], from_binary=True)
+
+ self.models = (EgivenFCoherent, SampleCountF, CountEF,
+ MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE)
+ self.models = tuple(contextless(feature) for feature in self.models)
+
+ def grammar(self, sentence):
+ out = Output()
+ cn = get_cn(sentence)
+ self.factory.input_file(cn, out)
+ return str(out)
+
+def main(config):
+ sys.path.append(os.path.dirname(config))
+ module = __import__(os.path.basename(config).replace('.py', ''))
+ extractor = GrammarExtractor(module.__dict__)
+ print extractor.grammar(next(sys.stdin))
+
+if __name__ == '__main__':
+ import sys, os
+ if len(sys.argv) != 2 or not sys.argv[1].endswith('.py'):
+ sys.stderr.write('Usage: %s config.py\n' % sys.argv[0])
+ sys.exit(1)
+ main(*sys.argv[1:])
diff --git a/python/cdec/scfg/features.py b/python/cdec/scfg/features.py
new file mode 100644
index 00000000..6419cdd8
--- /dev/null
+++ b/python/cdec/scfg/features.py
@@ -0,0 +1,62 @@
+from __future__ import division
+import math
+import sym
+
+def contextless(feature):
+ feature.compute_contextless_score = feature
+ return feature
+
+MAXSCORE = 99
+
+def EgivenF(fphrase, ephrase, paircount, fcount, fsample_count): # p(e|f)
+ return -math.log10(paircount/fcount)
+
+def CountEF(fphrase, ephrase, paircount, fcount, fsample_count):
+ return math.log10(1 + paircount)
+
+def SampleCountF(fphrase, ephrase, paircount, fcount, fsample_count):
+ return math.log10(1 + fsample_count)
+
+def EgivenFCoherent(fphrase, ephrase, paircount, fcount, fsample_count):
+ prob = paircount/fsample_count
+ return -math.log10(prob) if prob > 0 else MAXSCORE
+
+def CoherenceProb(fphrase, ephrase, paircount, fcount, fsample_count):
+ return -math.log10(fcount/fsample_count)
+
+def MaxLexEgivenF(ttable):
+ def feature(fphrase, ephrase, paircount, fcount, fsample_count):
+ fwords = [sym.tostring(w) for w in fphrase if not sym.isvar(w)] + ['NULL']
+ ewords = (sym.tostring(w) for w in ephrase if not sym.isvar(w))
+ def score():
+ for e in ewords:
+ maxScore = max(ttable.get_score(f, e, 0) for f in fwords)
+ yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE
+ return sum(score())
+ return feature
+
+def MaxLexFgivenE(ttable):
+ def feature(fphrase, ephrase, paircount, fcount, fsample_count):
+ fwords = (sym.tostring(w) for w in fphrase if not sym.isvar(w))
+ ewords = [sym.tostring(w) for w in ephrase if not sym.isvar(w)] + ['NULL']
+ def score():
+ for f in fwords:
+ maxScore = max(ttable.get_score(f, e, 1) for e in ewords)
+ yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE
+ return sum(score())
+ return feature
+
+def IsSingletonF(fphrase, ephrase, paircount, fcount, fsample_count):
+ return (fcount == 1)
+
+def IsSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count):
+ return (paircount == 1)
+
+def IsNotSingletonF(fphrase, ephrase, paircount, fcount, fsample_count):
+ return (fcount > 1)
+
+def IsNotSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count):
+ return (paircount > 1)
+
+def IsFEGreaterThanZero(fphrase, ephrase, paircount, fcount, fsample_count):
+ return (paircount > 0.01)