summaryrefslogtreecommitdiff
path: root/klm/lm/model.hh
diff options
context:
space:
mode:
authorKenneth Heafield <kenlm@kheafield.com>2011-06-26 18:40:15 -0400
committerPatrick Simianer <p@simianer.de>2011-09-23 19:13:57 +0200
commit9308fcdbdbe6564d25fb848139d3f55254d491be (patch)
tree67fdaa819488e231b5d70b2227527510571f2108 /klm/lm/model.hh
parent83a6ea9b5a4064a83035b505350618fbf81baeff (diff)
Quantization
Diffstat (limited to 'klm/lm/model.hh')
-rw-r--r--klm/lm/model.hh21
1 files changed, 14 insertions, 7 deletions
diff --git a/klm/lm/model.hh b/klm/lm/model.hh
index b85ccdcc..1f49a382 100644
--- a/klm/lm/model.hh
+++ b/klm/lm/model.hh
@@ -5,6 +5,7 @@
#include "lm/config.hh"
#include "lm/facade.hh"
#include "lm/max_order.hh"
+#include "lm/quantize.hh"
#include "lm/search_hashed.hh"
#include "lm/search_trie.hh"
#include "lm/vocab.hh"
@@ -70,9 +71,10 @@ template <class Search, class VocabularyT> class GenericModel : public base::Mod
private:
typedef base::ModelFacade<GenericModel<Search, VocabularyT>, State, VocabularyT> P;
public:
- // Get the size of memory that will be mapped given ngram counts. This
- // does not include small non-mapped control structures, such as this class
- // itself.
+ /* Get the size of memory that will be mapped given ngram counts. This
+ * does not include small non-mapped control structures, such as this class
+ * itself.
+ */
static size_t Size(const std::vector<uint64_t> &counts, const Config &config = Config());
/* Load the model from a file. It may be an ARPA or binary file. Binary
@@ -111,6 +113,11 @@ template <class Search, class VocabularyT> class GenericModel : public base::Mod
private:
friend void LoadLM<>(const char *file, const Config &config, GenericModel<Search, VocabularyT> &to);
+ static void UpdateConfigFromBinary(int fd, const std::vector<uint64_t> &counts, Config &config) {
+ AdvanceOrThrow(fd, VocabularyT::Size(counts[0], config));
+ Search::UpdateConfigFromBinary(fd, counts, config);
+ }
+
float SlowBackoffLookup(const WordIndex *const context_rbegin, const WordIndex *const context_rend, unsigned char start) const;
FullScoreReturn ScoreExceptBackoff(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const;
@@ -130,9 +137,7 @@ template <class Search, class VocabularyT> class GenericModel : public base::Mod
VocabularyT vocab_;
- typedef typename Search::Unigram Unigram;
typedef typename Search::Middle Middle;
- typedef typename Search::Longest Longest;
Search search_;
};
@@ -141,13 +146,15 @@ template <class Search, class VocabularyT> class GenericModel : public base::Mod
// These must also be instantiated in the cc file.
typedef ::lm::ngram::ProbingVocabulary Vocabulary;
-typedef detail::GenericModel<detail::ProbingHashedSearch, Vocabulary> ProbingModel;
+typedef detail::GenericModel<detail::ProbingHashedSearch, Vocabulary> ProbingModel; // HASH_PROBING
// Default implementation. No real reason for it to be the default.
typedef ProbingModel Model;
// Smaller implementation.
typedef ::lm::ngram::SortedVocabulary SortedVocabulary;
-typedef detail::GenericModel<trie::TrieSearch, SortedVocabulary> TrieModel;
+typedef detail::GenericModel<trie::TrieSearch<DontQuantize>, SortedVocabulary> TrieModel; // TRIE_SORTED
+
+typedef detail::GenericModel<trie::TrieSearch<SeparatelyQuantize>, SortedVocabulary> QuantTrieModel; // QUANT_TRIE_SORTED
} // namespace ngram
} // namespace lm