diff options
| author | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2012-08-01 17:32:37 +0200 | 
|---|---|---|
| committer | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2012-08-01 17:32:37 +0200 | 
| commit | 3f8e33cfe481a09c121a410e66a6074b5d05683e (patch) | |
| tree | a41ecaf0bbb69fa91a581623abe89d41219c04f8 /python/cdec | |
| parent | c139ce495861bb341e1b86a85ad4559f9ad53c14 (diff) | |
| parent | 9fe0219562e5db25171cce8776381600ff9a5649 (diff) | |
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'python/cdec')
| -rw-r--r-- | python/cdec/__init__.py | 2 | ||||
| -rw-r--r-- | python/cdec/scfg/__init__.py | 1 | ||||
| -rw-r--r-- | python/cdec/scfg/extractor.py | 112 | ||||
| -rw-r--r-- | python/cdec/scfg/features.py | 62 | ||||
| -rw-r--r-- | python/cdec/score.py | 1 | 
5 files changed, 0 insertions, 178 deletions
diff --git a/python/cdec/__init__.py b/python/cdec/__init__.py deleted file mode 100644 index 0d7b8782..00000000 --- a/python/cdec/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from _cdec import Decoder, Lattice -import score diff --git a/python/cdec/scfg/__init__.py b/python/cdec/scfg/__init__.py deleted file mode 100644 index 6eb2f88f..00000000 --- a/python/cdec/scfg/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from extractor import GrammarExtractor diff --git a/python/cdec/scfg/extractor.py b/python/cdec/scfg/extractor.py deleted file mode 100644 index 0a45ddb8..00000000 --- a/python/cdec/scfg/extractor.py +++ /dev/null @@ -1,112 +0,0 @@ -import StringIO -from itertools import chain - -import clex -import rulefactory -import calignment -import csuf -import cdat -import sym -import log - -from features import EgivenFCoherent, SampleCountF, CountEF,\ -        MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE -from features import contextless - -log.level = -1 - -class Output(StringIO.StringIO): -    def close(self): -        pass - -    def __str__(self): -        return self.getvalue() - -def get_cn(sentence): -    sentence = chain(('<s>',), sentence.split(), ('</s>',)) -    sentence = (sym.fromstring(word, terminal=True) for word in sentence) -    return tuple(((word, None, 1), ) for word in sentence) - -class PhonyGrammar: -    def add(self, thing): -        pass - -class GrammarExtractor: -    def __init__(self, config): -        alignment = calignment.Alignment(config['a_file'], from_binary=True) -        self.factory = rulefactory.HieroCachingRuleFactory( -                # compiled alignment object (REQUIRED) -                alignment=alignment, -                # name of generic nonterminal used by Hiero -                category="[X]", -                # do not change for extraction -                grammar=PhonyGrammar(), # TODO: set to None? -                # maximum number of contiguous chunks of terminal symbols in RHS of a rule. If None, defaults to max_nonterminals+1 -                max_chunks=None, -                # maximum span of a grammar rule in TEST DATA -                max_initial_size=15, -                # maximum number of symbols (both T and NT) allowed in a rule -                max_length=config['max_len'], -                # maximum number of nonterminals allowed in a rule (set >2 at your own risk) -                max_nonterminals=config['max_nt'], -                # maximum number of contiguous chunks of terminal symbols in target-side RHS of a rule. If None, defaults to max_nonterminals+1 -                max_target_chunks=None, -                # maximum number of target side symbols (both T and NT) allowed in a rule. If None, defaults to max_initial_size -                max_target_length=None, -                # minimum span of a nonterminal in the RHS of a rule in TEST DATA -                min_gap_size=1, -                # filename of file containing precomputed collocations -                precompute_file=config['precompute_file'], -                # maximum frequency rank of patterns used to compute triples (don't set higher than 20). -                precompute_secondary_rank=config['rank2'], -                # maximum frequency rank of patterns used to compute collocations (no need to set higher than maybe 200-300) -                precompute_rank=config['rank1'], -                # require extracted rules to have at least one aligned word -                require_aligned_terminal=True, -                # require each contiguous chunk of extracted rules to have at least one aligned word -                require_aligned_chunks=False, -                # generate a complete grammar for each input sentence -                per_sentence_grammar=True, -                # maximum span of a grammar rule extracted from TRAINING DATA -                train_max_initial_size=config['max_size'], -                # minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA -                train_min_gap_size=config['min_gap'], -                # True if phrases should be tight, False otherwise (False seems to give better results but is slower) -                tight_phrases=True, -                ) -        self.fsarray = csuf.SuffixArray(config['f_sa_file'], from_binary=True) -        self.edarray = cdat.DataArray(config['e_file'], from_binary=True) - -        self.factory.registerContext(self) - -        # lower=faster, higher=better; improvements level off above 200-300 range, -1 = don't sample, use all data (VERY SLOW!) -        self.sampler = rulefactory.Sampler(300) -        self.sampler.registerContext(self) - -        # lexical weighting tables -        tt = clex.CLex(config['lex_file'], from_binary=True) - -        self.models = (EgivenFCoherent, SampleCountF, CountEF,  -                MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE) -        self.models = tuple(contextless(feature) for feature in self.models) - -    def grammar(self, sentence): -        if isinstance(sentence, unicode): -            sentence = sentence.encode('utf8') -        out = Output() -        cn = get_cn(sentence) -        self.factory.input(cn, output=out) -        return str(out) - -def main(config): -    sys.path.append(os.path.dirname(config)) -    module =  __import__(os.path.basename(config).replace('.py', '')) -    extractor = GrammarExtractor(module.__dict__) -    print extractor.grammar(next(sys.stdin)) - -if __name__ == '__main__': -    import sys, os -    if len(sys.argv) != 2 or not sys.argv[1].endswith('.py'): -        sys.stderr.write('Usage: %s config.py\n' % sys.argv[0]) -        sys.exit(1) -    main(*sys.argv[1:]) diff --git a/python/cdec/scfg/features.py b/python/cdec/scfg/features.py deleted file mode 100644 index 6419cdd8..00000000 --- a/python/cdec/scfg/features.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import division -import math -import sym - -def contextless(feature): -    feature.compute_contextless_score = feature -    return feature - -MAXSCORE = 99 - -def EgivenF(fphrase, ephrase, paircount, fcount, fsample_count): # p(e|f) -    return -math.log10(paircount/fcount) - -def CountEF(fphrase, ephrase, paircount, fcount, fsample_count): -    return math.log10(1 + paircount) - -def SampleCountF(fphrase, ephrase, paircount, fcount, fsample_count): -    return math.log10(1 + fsample_count) - -def EgivenFCoherent(fphrase, ephrase, paircount, fcount, fsample_count): -    prob = paircount/fsample_count -    return -math.log10(prob) if prob > 0 else MAXSCORE - -def CoherenceProb(fphrase, ephrase, paircount, fcount, fsample_count): -    return -math.log10(fcount/fsample_count) - -def MaxLexEgivenF(ttable): -    def feature(fphrase, ephrase, paircount, fcount, fsample_count): -        fwords = [sym.tostring(w) for w in fphrase if not sym.isvar(w)] + ['NULL'] -        ewords = (sym.tostring(w) for w in ephrase if not sym.isvar(w)) -        def score(): -            for e in ewords: -              maxScore = max(ttable.get_score(f, e, 0) for f in fwords) -              yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE -        return sum(score()) -    return feature - -def MaxLexFgivenE(ttable): -    def feature(fphrase, ephrase, paircount, fcount, fsample_count): -        fwords = (sym.tostring(w) for w in fphrase if not sym.isvar(w)) -        ewords = [sym.tostring(w) for w in ephrase if not sym.isvar(w)] + ['NULL'] -        def score(): -            for f in fwords: -              maxScore = max(ttable.get_score(f, e, 1) for e in ewords) -              yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE -        return sum(score()) -    return feature - -def IsSingletonF(fphrase, ephrase, paircount, fcount, fsample_count): -    return (fcount == 1) - -def IsSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count): -    return (paircount == 1) - -def IsNotSingletonF(fphrase, ephrase, paircount, fcount, fsample_count): -    return (fcount > 1) - -def IsNotSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count): -    return (paircount > 1) - -def IsFEGreaterThanZero(fphrase, ephrase, paircount, fcount, fsample_count): -    return (paircount > 0.01) diff --git a/python/cdec/score.py b/python/cdec/score.py deleted file mode 100644 index c107446f..00000000 --- a/python/cdec/score.py +++ /dev/null @@ -1 +0,0 @@ -from _cdec import BLEU, TER  | 
