1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
#include "lm/enumerate_vocab.hh"
#include "lm/model.hh"
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <string>
#include <sys/resource.h>
#include <sys/time.h>
float FloatSec(const struct timeval &tv) {
return static_cast<float>(tv.tv_sec) + (static_cast<float>(tv.tv_usec) / 1000000000.0);
}
void PrintUsage(const char *message) {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage)) {
perror("getrusage");
return;
}
std::cerr << message;
std::cerr << "user\t" << FloatSec(usage.ru_utime) << "\nsys\t" << FloatSec(usage.ru_stime) << '\n';
// Linux doesn't set memory usage :-(.
std::ifstream status("/proc/self/status", std::ios::in);
std::string line;
while (getline(status, line)) {
if (!strncmp(line.c_str(), "VmRSS:\t", 7)) {
std::cerr << "rss " << (line.c_str() + 7) << '\n';
break;
}
}
}
template <class Model> void Query(const Model &model) {
PrintUsage("Loading statistics:\n");
typename Model::State state, out;
lm::FullScoreReturn ret;
std::string word;
while (std::cin) {
state = model.BeginSentenceState();
float total = 0.0;
bool got = false;
while (std::cin >> word) {
got = true;
lm::WordIndex vocab = model.GetVocabulary().Index(word);
ret = model.FullScore(state, vocab, out);
total += ret.prob;
std::cout << word << '=' << vocab << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\n';
state = out;
if (std::cin.get() == '\n') break;
}
if (!got && !std::cin) break;
ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out);
total += ret.prob;
std::cout << "</s>=" << model.GetVocabulary().EndSentence() << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\n';
std::cout << "Total: " << total << '\n';
}
PrintUsage("After queries:\n");
}
class PrintVocab : public lm::ngram::EnumerateVocab {
public:
void Add(lm::WordIndex index, const StringPiece &str) {
std::cerr << "vocab " << index << ' ' << str << '\n';
}
};
template <class Model> void Query(const char *name) {
lm::ngram::Config config;
PrintVocab printer;
config.enumerate_vocab = &printer;
Model model(name, config);
Query(model);
}
int main(int argc, char *argv[]) {
if (argc < 2) {
std::cerr << "Pass language model name." << std::endl;
return 0;
}
lm::ngram::ModelType model_type;
if (lm::ngram::RecognizeBinary(argv[1], model_type)) {
switch(model_type) {
case lm::ngram::HASH_PROBING:
Query<lm::ngram::ProbingModel>(argv[1]);
break;
case lm::ngram::HASH_SORTED:
Query<lm::ngram::SortedModel>(argv[1]);
break;
case lm::ngram::TRIE_SORTED:
Query<lm::ngram::TrieModel>(argv[1]);
break;
default:
std::cerr << "Unrecognized kenlm model type " << model_type << std::endl;
abort();
}
} else {
Query<lm::ngram::ProbingModel>(argv[1]);
}
PrintUsage("Total time including destruction:\n");
}
|