summaryrefslogtreecommitdiff
path: root/python/cdec/scfg
diff options
context:
space:
mode:
authorVictor Chahuneau <vchahune@cs.cmu.edu>2012-07-27 01:16:03 -0400
committerVictor Chahuneau <vchahune@cs.cmu.edu>2012-07-27 01:16:03 -0400
commitb2a8bccb2bd713d9ec081cf3dad0162c2cb492d8 (patch)
treec661044fd2a3943cf2ad12109b916fd7b56a519e /python/cdec/scfg
parent148b1168c2b07abf0c7757a31141377c28ec3d91 (diff)
[python] Fork of the suffix-array extractor with surface improvements
Available as the cdec.sa module, with commande-line helpers: python -m cdec.sa.compile -f ... -e ... -a ... -o sa-out/ -c extract.ini python -m cdec.sa.extract -c extract.ini -g grammars-out/ < input.txt > input.sgml + renamed cdec.scfg -> cdec.sa + Python README
Diffstat (limited to 'python/cdec/scfg')
-rw-r--r--python/cdec/scfg/__init__.py1
-rw-r--r--python/cdec/scfg/extractor.py120
-rw-r--r--python/cdec/scfg/features.py62
3 files changed, 0 insertions, 183 deletions
diff --git a/python/cdec/scfg/__init__.py b/python/cdec/scfg/__init__.py
deleted file mode 100644
index 6eb2f88f..00000000
--- a/python/cdec/scfg/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from extractor import GrammarExtractor
diff --git a/python/cdec/scfg/extractor.py b/python/cdec/scfg/extractor.py
deleted file mode 100644
index 1dfa2421..00000000
--- a/python/cdec/scfg/extractor.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import sys, os
-import re
-import StringIO
-from itertools import chain
-
-import clex
-import rulefactory
-import calignment
-import csuf
-import cdat
-import sym
-import log
-
-from features import EgivenFCoherent, SampleCountF, CountEF,\
- MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE
-from features import contextless
-
-log.level = -1
-
-class Output(StringIO.StringIO):
- def close(self):
- pass
-
- def __str__(self):
- return self.getvalue()
-
-def get_cn(sentence):
- sentence = chain(('<s>',), sentence.split(), ('</s>',))
- sentence = (sym.fromstring(word, terminal=True) for word in sentence)
- return tuple(((word, None, 1), ) for word in sentence)
-
-class PhonyGrammar:
- def add(self, thing):
- pass
-
-class GrammarExtractor:
- def __init__(self, cfg):
- if isinstance(cfg, dict):
- config = cfg
- elif isinstance(cfg, str):
- cfg_file = os.path.basename(cfg)
- if not re.match(r'^\w+\.py$', cfg_file):
- raise ValueError('Config must be a *.py file')
- sys.path.append(os.path.dirname(cfg))
- config = __import__(cfg_file.replace('.py', '')).__dict__
- sys.path.pop()
- alignment = calignment.Alignment(config['a_file'], from_binary=True)
- self.factory = rulefactory.HieroCachingRuleFactory(
- # compiled alignment object (REQUIRED)
- alignment=alignment,
- # name of generic nonterminal used by Hiero
- category="[X]",
- # do not change for extraction
- grammar=PhonyGrammar(), # TODO: set to None?
- # maximum number of contiguous chunks of terminal symbols in RHS of a rule. If None, defaults to max_nonterminals+1
- max_chunks=None,
- # maximum span of a grammar rule in TEST DATA
- max_initial_size=15,
- # maximum number of symbols (both T and NT) allowed in a rule
- max_length=config['max_len'],
- # maximum number of nonterminals allowed in a rule (set >2 at your own risk)
- max_nonterminals=config['max_nt'],
- # maximum number of contiguous chunks of terminal symbols in target-side RHS of a rule. If None, defaults to max_nonterminals+1
- max_target_chunks=None,
- # maximum number of target side symbols (both T and NT) allowed in a rule. If None, defaults to max_initial_size
- max_target_length=None,
- # minimum span of a nonterminal in the RHS of a rule in TEST DATA
- min_gap_size=1,
- # filename of file containing precomputed collocations
- precompute_file=config['precompute_file'],
- # maximum frequency rank of patterns used to compute triples (don't set higher than 20).
- precompute_secondary_rank=config['rank2'],
- # maximum frequency rank of patterns used to compute collocations (no need to set higher than maybe 200-300)
- precompute_rank=config['rank1'],
- # require extracted rules to have at least one aligned word
- require_aligned_terminal=True,
- # require each contiguous chunk of extracted rules to have at least one aligned word
- require_aligned_chunks=False,
- # generate a complete grammar for each input sentence
- per_sentence_grammar=True,
- # maximum span of a grammar rule extracted from TRAINING DATA
- train_max_initial_size=config['max_size'],
- # minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA
- train_min_gap_size=config['min_gap'],
- # True if phrases should be tight, False otherwise (False seems to give better results but is slower)
- tight_phrases=True,
- )
- self.fsarray = csuf.SuffixArray(config['f_sa_file'], from_binary=True)
- self.edarray = cdat.DataArray(config['e_file'], from_binary=True)
-
- self.factory.registerContext(self)
-
- # lower=faster, higher=better; improvements level off above 200-300 range, -1 = don't sample, use all data (VERY SLOW!)
- self.sampler = rulefactory.Sampler(300)
- self.sampler.registerContext(self)
-
- # lexical weighting tables
- tt = clex.CLex(config['lex_file'], from_binary=True)
-
- self.models = (EgivenFCoherent, SampleCountF, CountEF,
- MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE)
- self.models = tuple(contextless(feature) for feature in self.models)
-
- def grammar(self, sentence):
- if isinstance(sentence, unicode):
- sentence = sentence.encode('utf8')
- out = Output()
- cn = get_cn(sentence)
- self.factory.input(cn, output=out)
- return str(out)
-
-def main(config):
- extractor = GrammarExtractor(config)
- sys.stdout.write(extractor.grammar(next(sys.stdin)))
-
-if __name__ == '__main__':
- if len(sys.argv) != 2 or not sys.argv[1].endswith('.py'):
- sys.stderr.write('Usage: %s config.py\n' % sys.argv[0])
- sys.exit(1)
- main(*sys.argv[1:])
diff --git a/python/cdec/scfg/features.py b/python/cdec/scfg/features.py
deleted file mode 100644
index 6419cdd8..00000000
--- a/python/cdec/scfg/features.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from __future__ import division
-import math
-import sym
-
-def contextless(feature):
- feature.compute_contextless_score = feature
- return feature
-
-MAXSCORE = 99
-
-def EgivenF(fphrase, ephrase, paircount, fcount, fsample_count): # p(e|f)
- return -math.log10(paircount/fcount)
-
-def CountEF(fphrase, ephrase, paircount, fcount, fsample_count):
- return math.log10(1 + paircount)
-
-def SampleCountF(fphrase, ephrase, paircount, fcount, fsample_count):
- return math.log10(1 + fsample_count)
-
-def EgivenFCoherent(fphrase, ephrase, paircount, fcount, fsample_count):
- prob = paircount/fsample_count
- return -math.log10(prob) if prob > 0 else MAXSCORE
-
-def CoherenceProb(fphrase, ephrase, paircount, fcount, fsample_count):
- return -math.log10(fcount/fsample_count)
-
-def MaxLexEgivenF(ttable):
- def feature(fphrase, ephrase, paircount, fcount, fsample_count):
- fwords = [sym.tostring(w) for w in fphrase if not sym.isvar(w)] + ['NULL']
- ewords = (sym.tostring(w) for w in ephrase if not sym.isvar(w))
- def score():
- for e in ewords:
- maxScore = max(ttable.get_score(f, e, 0) for f in fwords)
- yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE
- return sum(score())
- return feature
-
-def MaxLexFgivenE(ttable):
- def feature(fphrase, ephrase, paircount, fcount, fsample_count):
- fwords = (sym.tostring(w) for w in fphrase if not sym.isvar(w))
- ewords = [sym.tostring(w) for w in ephrase if not sym.isvar(w)] + ['NULL']
- def score():
- for f in fwords:
- maxScore = max(ttable.get_score(f, e, 1) for e in ewords)
- yield -math.log10(maxScore) if maxScore > 0 else MAXSCORE
- return sum(score())
- return feature
-
-def IsSingletonF(fphrase, ephrase, paircount, fcount, fsample_count):
- return (fcount == 1)
-
-def IsSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count):
- return (paircount == 1)
-
-def IsNotSingletonF(fphrase, ephrase, paircount, fcount, fsample_count):
- return (fcount > 1)
-
-def IsNotSingletonFE(fphrase, ephrase, paircount, fcount, fsample_count):
- return (paircount > 1)
-
-def IsFEGreaterThanZero(fphrase, ephrase, paircount, fcount, fsample_count):
- return (paircount > 0.01)