From 8cd3280391e6b1ba83357d3967638873a8c0920c Mon Sep 17 00:00:00 2001 From: redpony Date: Mon, 22 Nov 2010 23:00:34 +0000 Subject: faster alignment mode when full translation inference is not required git-svn-id: https://ws10smt.googlecode.com/svn/trunk@731 ec762483-ff6d-05da-a07a-a48fb63a330f --- decoder/decoder.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'decoder/decoder.cc') diff --git a/decoder/decoder.cc b/decoder/decoder.cc index daf82f10..f47b7385 100644 --- a/decoder/decoder.cc +++ b/decoder/decoder.cc @@ -368,6 +368,7 @@ DecoderImpl::DecoderImpl(po::variables_map& conf, int argc, char** argv, istream ("scale_prune_srclen", "scale beams by the input length (in # of tokens; may not be what you want for lattices") ("promise_power",po::value()->default_value(0), "Give more beam budget to more promising previous-pass nodes when pruning - but allocate the same average beams. 0 means off, 1 means beam proportional to inside*outside prob, n means nth power (affects just --cubepruning_pop_limit). note: for the same pop_limit, this gives more search error unless very close to 0 (recommend disabled; even 0.01 is slightly worse than 0) which is a bad sign and suggests this isn't doing a good job; further it's slightly slower to LM cube rescore with 0.01 compared to 0, as well as giving (very insignificantly) lower BLEU. TODO: test under more conditions, or try idea with different formula, or prob. cube beams.") ("lextrans_use_null", "Support source-side null words in lexical translation") + ("lextrans_align_only", "Only used in alignment mode. Limit target words generated by reference") ("tagger_tagset,t", po::value(), "(Tagger) file containing tag set") ("csplit_output_plf", "(Compound splitter) Output lattice in PLF format") ("csplit_preserve_full_word", "(Compound splitter) Always include the unsegmented form in the output lattice") -- cgit v1.2.3