summaryrefslogtreecommitdiff
path: root/klm/alone/main.cc
blob: e09ab01d413e034e14229fd91cd866344e3c592c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#include "alone/threading.hh"
#include "search/config.hh"
#include "search/context.hh"
#include "util/exception.hh"
#include "util/file_piece.hh"
#include "util/usage.hh"

#include <boost/lexical_cast.hpp>

#include <iostream>
#include <memory>

namespace alone {

template <class Control> void ReadLoop(const std::string &graph_prefix, Control &control) {
  for (unsigned int sentence = 0; ; ++sentence) {
    std::stringstream name;
    name << graph_prefix << '/' << sentence;
    std::auto_ptr<util::FilePiece> file;
    try {
      file.reset(new util::FilePiece(name.str().c_str()));
    } catch (const util::ErrnoException &e) {
      if (e.Error() == ENOENT) return;
      throw;
    }
    control.Add(file.release());
  }
}

template <class Model> void RunWithModelType(const char *graph_prefix, const char *model_file, StringPiece weight_str, unsigned int pop_limit, unsigned int threads) {
  Model model(model_file);
  search::Weights weights(weight_str);
  search::Config config(weights, pop_limit);

  if (threads > 1) {
#ifdef WITH_THREADS
    Controller<Model> controller(config, model, threads, std::cout);
    ReadLoop(graph_prefix, controller);
#else
    UTIL_THROW(util::Exception, "Threading support not compiled in.");
#endif
  } else {
    InThread<Model> controller(config, model, std::cout);
    ReadLoop(graph_prefix, controller);
  }
}

void Run(const char *graph_prefix, const char *lm_name, StringPiece weight_str, unsigned int pop_limit, unsigned int threads) {
  lm::ngram::ModelType model_type;
  if (!lm::ngram::RecognizeBinary(lm_name, model_type)) model_type = lm::ngram::PROBING;
  switch (model_type) {
    case lm::ngram::PROBING:
      RunWithModelType<lm::ngram::ProbingModel>(graph_prefix, lm_name, weight_str, pop_limit, threads);
      break;
    case lm::ngram::REST_PROBING:
      RunWithModelType<lm::ngram::RestProbingModel>(graph_prefix, lm_name, weight_str, pop_limit, threads);
      break;
    default:
      UTIL_THROW(util::Exception, "Sorry this lm type isn't supported yet.");
  }
}

} // namespace alone

int main(int argc, char *argv[]) {
  if (argc < 5 || argc > 6) {
    std::cerr << argv[0] << " graph_prefix lm \"weights\" pop [threads]" << std::endl;
    return 1;
  }

#ifdef WITH_THREADS
  unsigned thread_count = boost::thread::hardware_concurrency();
#else
  unsigned thread_count = 1;
#endif
  if (argc == 6) {
    thread_count = boost::lexical_cast<unsigned>(argv[5]);
    UTIL_THROW_IF(!thread_count, util::Exception, "Thread count 0");
  }
  UTIL_THROW_IF(!thread_count, util::Exception, "Boost doesn't know how many threads there are.  Pass it on the command line.");
  alone::Run(argv[1], argv[2], argv[3], boost::lexical_cast<unsigned int>(argv[4]), thread_count);

  util::PrintUsage(std::cerr);
  return 0;
}