summaryrefslogtreecommitdiff
path: root/decoder/ff_lm.cc
diff options
context:
space:
mode:
authorChris Dyer <cdyer@cs.cmu.edu>2011-10-14 11:51:12 +0100
committerChris Dyer <cdyer@cs.cmu.edu>2011-10-14 11:51:12 +0100
commit708e8220d2cbc41fbbf2e8a713edf1d2cf3bc767 (patch)
treecb605fc7f1c9031f7adc0e74f0bd6b3817f8a065 /decoder/ff_lm.cc
parent575cd9f9102596b396ac8e7ab4f0c74b35d369e6 (diff)
remove FSA integration code. will have to be resurrected another day
Diffstat (limited to 'decoder/ff_lm.cc')
-rw-r--r--decoder/ff_lm.cc48
1 files changed, 0 insertions, 48 deletions
diff --git a/decoder/ff_lm.cc b/decoder/ff_lm.cc
index afa36b96..5e16d4e3 100644
--- a/decoder/ff_lm.cc
+++ b/decoder/ff_lm.cc
@@ -46,7 +46,6 @@ char const* usage_verbose="-n determines the name of the feature (and its weight
#endif
#include "ff_lm.h"
-#include "ff_lm_fsa.h"
#include <sstream>
#include <unistd.h>
@@ -69,10 +68,6 @@ char const* usage_verbose="-n determines the name of the feature (and its weight
using namespace std;
-string LanguageModelFsa::usage(bool param,bool verbose) {
- return FeatureFunction::usage_helper("LanguageModelFsa",usage_short,usage_verbose,param,verbose);
-}
-
string LanguageModel::usage(bool param,bool verbose) {
return FeatureFunction::usage_helper(usage_name,usage_short,usage_verbose,param,verbose);
}
@@ -524,49 +519,6 @@ LanguageModel::LanguageModel(const string& param) {
SetStateSize(LanguageModelImpl::OrderToStateSize(order));
}
-//TODO: decide whether to waste a word of space so states are always none-terminated for SRILM. otherwise we have to copy
-void LanguageModelFsa::set_ngram_order(int i) {
- assert(i>0);
- ngram_order_=i;
- ctxlen_=i-1;
- set_state_bytes(ctxlen_*sizeof(WordID));
- WordID *ss=(WordID*)start.begin();
- WordID *hs=(WordID*)h_start.begin();
- if (ctxlen_) { // avoid segfault in case of unigram lm (0 state)
- set_end_phrase(TD::Convert("</s>"));
-// se is pretty boring in unigram case, just adds constant prob. check that this is what we want
- ss[0]=TD::Convert("<s>"); // start-sentence context (length 1)
- hs[0]=0; // empty context
- for (int i=1;i<ctxlen_;++i) {
- ss[i]=hs[i]=0; // need this so storage is initialized for hashing.
- //TODO: reevaluate whether state space comes cleared by allocator or not.
- }
- }
- sync(); // for dynamic markov_order copy etc
-}
-
-LanguageModelFsa::LanguageModelFsa(string const& param) {
- int lmorder;
- pimpl_ = make_lm_impl(param,&lmorder,&fid_);
- Init();
- floor_=pimpl_->floor_;
- set_ngram_order(lmorder);
-}
-
-void LanguageModelFsa::print_state(ostream &o,void const* st) const {
- WordID const *wst=(WordID const*)st;
- o<<'[';
- bool sp=false;
- for (int i=ctxlen_;i>0;sp=true) {
- --i;
- WordID w=wst[i];
- if (w==0) continue;
- if (sp) o<<' ';
- o << TD::Convert(w);
- }
- o<<']';
-}
-
Features LanguageModel::features() const {
return single_feature(fid_);
}