summaryrefslogtreecommitdiff
path: root/klm/lm/search_hashed.cc
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-05-31 13:57:24 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-05-31 13:57:24 +0200
commitf1ba05780db1705493d9afb562332498b93d26f1 (patch)
treefb429a657ba97f33e8140742de9bc74d9fc88e75 /klm/lm/search_hashed.cc
parentaadabfdf37dfd451485277cb77fad02f77b361c6 (diff)
parent317d650f6cb1e24ac6f3be6f7bf9d4246a59e0e5 (diff)
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'klm/lm/search_hashed.cc')
-rw-r--r--klm/lm/search_hashed.cc243
1 files changed, 173 insertions, 70 deletions
diff --git a/klm/lm/search_hashed.cc b/klm/lm/search_hashed.cc
index 1d6fb5be..13942309 100644
--- a/klm/lm/search_hashed.cc
+++ b/klm/lm/search_hashed.cc
@@ -3,7 +3,9 @@
#include "lm/binary_format.hh"
#include "lm/blank.hh"
#include "lm/lm_exception.hh"
+#include "lm/model.hh"
#include "lm/read_arpa.hh"
+#include "lm/value.hh"
#include "lm/vocab.hh"
#include "util/bit_packing.hh"
@@ -14,6 +16,8 @@
namespace lm {
namespace ngram {
+class ProbingModel;
+
namespace {
/* These are passed to ReadNGrams so that n-grams with zero backoff that appear as context will still be used in state. */
@@ -37,9 +41,9 @@ template <class Middle> class ActivateLowerMiddle {
Middle &modify_;
};
-class ActivateUnigram {
+template <class Weights> class ActivateUnigram {
public:
- explicit ActivateUnigram(ProbBackoff *unigram) : modify_(unigram) {}
+ explicit ActivateUnigram(Weights *unigram) : modify_(unigram) {}
void operator()(const WordIndex *vocab_ids, const unsigned int /*n*/) {
// assert(n == 2);
@@ -47,43 +51,124 @@ class ActivateUnigram {
}
private:
- ProbBackoff *modify_;
+ Weights *modify_;
};
-template <class Middle> void FixSRI(int lower, float negative_lower_prob, unsigned int n, const uint64_t *keys, const WordIndex *vocab_ids, ProbBackoff *unigrams, std::vector<Middle> &middle) {
- ProbBackoff blank;
- blank.backoff = kNoExtensionBackoff;
- // Fix SRI's stupidity.
- // Note that negative_lower_prob is the negative of the probability (so it's currently >= 0). We still want the sign bit off to indicate left extension, so I just do -= on the backoffs.
- blank.prob = negative_lower_prob;
- // An entry was found at lower (order lower + 2).
- // We need to insert blanks starting at lower + 1 (order lower + 3).
- unsigned int fix = static_cast<unsigned int>(lower + 1);
- uint64_t backoff_hash = detail::CombineWordHash(static_cast<uint64_t>(vocab_ids[1]), vocab_ids[2]);
- if (fix == 0) {
- // Insert a missing bigram.
- blank.prob -= unigrams[vocab_ids[1]].backoff;
- SetExtension(unigrams[vocab_ids[1]].backoff);
- // Bigram including a unigram's backoff
- middle[0].Insert(detail::ProbBackoffEntry::Make(keys[0], blank));
- fix = 1;
- } else {
- for (unsigned int i = 3; i < fix + 2; ++i) backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[i]);
+// Find the lower order entry, inserting blanks along the way as necessary.
+template <class Value> void FindLower(
+ const std::vector<uint64_t> &keys,
+ typename Value::Weights &unigram,
+ std::vector<util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> > &middle,
+ std::vector<typename Value::Weights *> &between) {
+ typename util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash>::MutableIterator iter;
+ typename Value::ProbingEntry entry;
+ // Backoff will always be 0.0. We'll get the probability and rest in another pass.
+ entry.value.backoff = kNoExtensionBackoff;
+ // Go back and find the longest right-aligned entry, informing it that it extends left. Normally this will match immediately, but sometimes SRI is dumb.
+ for (int lower = keys.size() - 2; ; --lower) {
+ if (lower == -1) {
+ between.push_back(&unigram);
+ return;
+ }
+ entry.key = keys[lower];
+ bool found = middle[lower].FindOrInsert(entry, iter);
+ between.push_back(&iter->value);
+ if (found) return;
+ }
+}
+
+// Between usually has single entry, the value to adjust. But sometimes SRI stupidly pruned entries so it has unitialized blank values to be set here.
+template <class Added, class Build> void AdjustLower(
+ const Added &added,
+ const Build &build,
+ std::vector<typename Build::Value::Weights *> &between,
+ const unsigned int n,
+ const std::vector<WordIndex> &vocab_ids,
+ typename Build::Value::Weights *unigrams,
+ std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle) {
+ typedef typename Build::Value Value;
+ if (between.size() == 1) {
+ build.MarkExtends(*between.front(), added);
+ return;
+ }
+ typedef util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> Middle;
+ float prob = -fabs(between.back()->prob);
+ // Order of the n-gram on which probabilities are based.
+ unsigned char basis = n - between.size();
+ assert(basis != 0);
+ typename Build::Value::Weights **change = &between.back();
+ // Skip the basis.
+ --change;
+ if (basis == 1) {
+ // Hallucinate a bigram based on a unigram's backoff and a unigram probability.
+ float &backoff = unigrams[vocab_ids[1]].backoff;
+ SetExtension(backoff);
+ prob += backoff;
+ (*change)->prob = prob;
+ build.SetRest(&*vocab_ids.begin(), 2, **change);
+ basis = 2;
+ --change;
}
- // fix >= 1. Insert trigrams and above.
- for (; fix <= n - 3; ++fix) {
+ uint64_t backoff_hash = static_cast<uint64_t>(vocab_ids[1]);
+ for (unsigned char i = 2; i <= basis; ++i) {
+ backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[i]);
+ }
+ for (; basis < n - 1; ++basis, --change) {
typename Middle::MutableIterator gotit;
- if (middle[fix - 1].UnsafeMutableFind(backoff_hash, gotit)) {
+ if (middle[basis - 2].UnsafeMutableFind(backoff_hash, gotit)) {
float &backoff = gotit->value.backoff;
SetExtension(backoff);
- blank.prob -= backoff;
+ prob += backoff;
}
- middle[fix].Insert(detail::ProbBackoffEntry::Make(keys[fix], blank));
- backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[fix + 2]);
+ (*change)->prob = prob;
+ build.SetRest(&*vocab_ids.begin(), basis + 1, **change);
+ backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[basis+1]);
+ }
+
+ typename std::vector<typename Value::Weights *>::const_iterator i(between.begin());
+ build.MarkExtends(**i, added);
+ const typename Value::Weights *longer = *i;
+ // Everything has probability but is not marked as extending.
+ for (++i; i != between.end(); ++i) {
+ build.MarkExtends(**i, *longer);
+ longer = *i;
}
}
-template <class Voc, class Store, class Middle, class Activate> void ReadNGrams(util::FilePiece &f, const unsigned int n, const size_t count, const Voc &vocab, ProbBackoff *unigrams, std::vector<Middle> &middle, Activate activate, Store &store, PositiveProbWarn &warn) {
+// Continue marking lower entries even they know that they extend left. This is used for upper/lower bounds.
+template <class Build> void MarkLower(
+ const std::vector<uint64_t> &keys,
+ const Build &build,
+ typename Build::Value::Weights &unigram,
+ std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle,
+ int start_order,
+ const typename Build::Value::Weights &longer) {
+ if (start_order == 0) return;
+ typename util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash>::MutableIterator iter;
+ // Hopefully the compiler will realize that if MarkExtends always returns false, it can simplify this code.
+ for (int even_lower = start_order - 2 /* index in middle */; ; --even_lower) {
+ if (even_lower == -1) {
+ build.MarkExtends(unigram, longer);
+ return;
+ }
+ middle[even_lower].UnsafeMutableFind(keys[even_lower], iter);
+ if (!build.MarkExtends(iter->value, longer)) return;
+ }
+}
+
+template <class Build, class Activate, class Store> void ReadNGrams(
+ util::FilePiece &f,
+ const unsigned int n,
+ const size_t count,
+ const ProbingVocabulary &vocab,
+ const Build &build,
+ typename Build::Value::Weights *unigrams,
+ std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle,
+ Activate activate,
+ Store &store,
+ PositiveProbWarn &warn) {
+ typedef typename Build::Value Value;
+ typedef util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> Middle;
assert(n >= 2);
ReadNGramHeader(f, n);
@@ -91,38 +176,25 @@ template <class Voc, class Store, class Middle, class Activate> void ReadNGrams(
// vocab ids of words in reverse order.
std::vector<WordIndex> vocab_ids(n);
std::vector<uint64_t> keys(n-1);
- typename Store::Entry::Value value;
- typename Middle::MutableIterator found;
+ typename Store::Entry entry;
+ std::vector<typename Value::Weights *> between;
for (size_t i = 0; i < count; ++i) {
- ReadNGram(f, n, vocab, &*vocab_ids.begin(), value, warn);
+ ReadNGram(f, n, vocab, &*vocab_ids.begin(), entry.value, warn);
+ build.SetRest(&*vocab_ids.begin(), n, entry.value);
keys[0] = detail::CombineWordHash(static_cast<uint64_t>(vocab_ids.front()), vocab_ids[1]);
for (unsigned int h = 1; h < n - 1; ++h) {
keys[h] = detail::CombineWordHash(keys[h-1], vocab_ids[h+1]);
}
// Initially the sign bit is on, indicating it does not extend left. Most already have this but there might +0.0.
- util::SetSign(value.prob);
- store.Insert(Store::Entry::Make(keys[n-2], value));
- // Go back and find the longest right-aligned entry, informing it that it extends left. Normally this will match immediately, but sometimes SRI is dumb.
- int lower;
- util::FloatEnc fix_prob;
- for (lower = n - 3; ; --lower) {
- if (lower == -1) {
- fix_prob.f = unigrams[vocab_ids.front()].prob;
- fix_prob.i &= ~util::kSignBit;
- unigrams[vocab_ids.front()].prob = fix_prob.f;
- break;
- }
- if (middle[lower].UnsafeMutableFind(keys[lower], found)) {
- // Turn off sign bit to indicate that it extends left.
- fix_prob.f = found->value.prob;
- fix_prob.i &= ~util::kSignBit;
- found->value.prob = fix_prob.f;
- // We don't need to recurse further down because this entry already set the bits for lower entries.
- break;
- }
- }
- if (lower != static_cast<int>(n) - 3) FixSRI(lower, fix_prob.f, n, &*keys.begin(), &*vocab_ids.begin(), unigrams, middle);
+ util::SetSign(entry.value.prob);
+ entry.key = keys[n-2];
+
+ store.Insert(entry);
+ between.clear();
+ FindLower<Value>(keys, unigrams[vocab_ids.front()], middle, between);
+ AdjustLower<typename Store::Entry::Value, Build>(entry.value, build, between, n, vocab_ids, unigrams, middle);
+ if (Build::kMarkEvenLower) MarkLower<Build>(keys, build, unigrams[vocab_ids.front()], middle, n - between.size() - 1, *between.back());
activate(&*vocab_ids.begin(), n);
}
@@ -132,9 +204,9 @@ template <class Voc, class Store, class Middle, class Activate> void ReadNGrams(
} // namespace
namespace detail {
-template <class MiddleT, class LongestT> uint8_t *TemplateHashedSearch<MiddleT, LongestT>::SetupMemory(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config) {
+template <class Value> uint8_t *HashedSearch<Value>::SetupMemory(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config) {
std::size_t allocated = Unigram::Size(counts[0]);
- unigram = Unigram(start, allocated);
+ unigram_ = Unigram(start, counts[0], allocated);
start += allocated;
for (unsigned int n = 2; n < counts.size(); ++n) {
allocated = Middle::Size(counts[n - 1], config.probing_multiplier);
@@ -142,31 +214,63 @@ template <class MiddleT, class LongestT> uint8_t *TemplateHashedSearch<MiddleT,
start += allocated;
}
allocated = Longest::Size(counts.back(), config.probing_multiplier);
- longest = Longest(start, allocated);
+ longest_ = Longest(start, allocated);
start += allocated;
return start;
}
-template <class MiddleT, class LongestT> template <class Voc> void TemplateHashedSearch<MiddleT, LongestT>::InitializeFromARPA(const char * /*file*/, util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, Voc &vocab, Backing &backing) {
+template <class Value> void HashedSearch<Value>::InitializeFromARPA(const char * /*file*/, util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, ProbingVocabulary &vocab, Backing &backing) {
// TODO: fix sorted.
SetupMemory(GrowForSearch(config, vocab.UnkCountChangePadding(), Size(counts, config), backing), counts, config);
PositiveProbWarn warn(config.positive_log_probability);
-
- Read1Grams(f, counts[0], vocab, unigram.Raw(), warn);
+ Read1Grams(f, counts[0], vocab, unigram_.Raw(), warn);
CheckSpecials(config, vocab);
+ DispatchBuild(f, counts, config, vocab, warn);
+}
+
+template <> void HashedSearch<BackoffValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
+ NoRestBuild build;
+ ApplyBuild(f, counts, config, vocab, warn, build);
+}
+
+template <> void HashedSearch<RestValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
+ switch (config.rest_function) {
+ case Config::REST_MAX:
+ {
+ MaxRestBuild build;
+ ApplyBuild(f, counts, config, vocab, warn, build);
+ }
+ break;
+ case Config::REST_LOWER:
+ {
+ LowerRestBuild<ProbingModel> build(config, counts.size(), vocab);
+ ApplyBuild(f, counts, config, vocab, warn, build);
+ }
+ break;
+ }
+}
+
+template <class Value> template <class Build> void HashedSearch<Value>::ApplyBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build) {
+ for (WordIndex i = 0; i < counts[0]; ++i) {
+ build.SetRest(&i, (unsigned int)1, unigram_.Raw()[i]);
+ }
try {
if (counts.size() > 2) {
- ReadNGrams(f, 2, counts[1], vocab, unigram.Raw(), middle_, ActivateUnigram(unigram.Raw()), middle_[0], warn);
+ ReadNGrams<Build, ActivateUnigram<typename Value::Weights>, Middle>(
+ f, 2, counts[1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram<typename Value::Weights>(unigram_.Raw()), middle_[0], warn);
}
for (unsigned int n = 3; n < counts.size(); ++n) {
- ReadNGrams(f, n, counts[n-1], vocab, unigram.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_[n-3]), middle_[n-2], warn);
+ ReadNGrams<Build, ActivateLowerMiddle<Middle>, Middle>(
+ f, n, counts[n-1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_[n-3]), middle_[n-2], warn);
}
if (counts.size() > 2) {
- ReadNGrams(f, counts.size(), counts[counts.size() - 1], vocab, unigram.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_.back()), longest, warn);
+ ReadNGrams<Build, ActivateLowerMiddle<Middle>, Longest>(
+ f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_.back()), longest_, warn);
} else {
- ReadNGrams(f, counts.size(), counts[counts.size() - 1], vocab, unigram.Raw(), middle_, ActivateUnigram(unigram.Raw()), longest, warn);
+ ReadNGrams<Build, ActivateUnigram<typename Value::Weights>, Longest>(
+ f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram<typename Value::Weights>(unigram_.Raw()), longest_, warn);
}
} catch (util::ProbingSizeException &e) {
UTIL_THROW(util::ProbingSizeException, "Avoid pruning n-grams like \"bar baz quux\" when \"foo bar baz quux\" is still in the model. KenLM will work when this pruning happens, but the probing model assumes these events are rare enough that using blank space in the probing hash table will cover all of them. Increase probing_multiplier (-p to build_binary) to add more blank spaces.\n");
@@ -174,17 +278,16 @@ template <class MiddleT, class LongestT> template <class Voc> void TemplateHashe
ReadEnd(f);
}
-template <class MiddleT, class LongestT> void TemplateHashedSearch<MiddleT, LongestT>::LoadedBinary() {
- unigram.LoadedBinary();
+template <class Value> void HashedSearch<Value>::LoadedBinary() {
+ unigram_.LoadedBinary();
for (typename std::vector<Middle>::iterator i = middle_.begin(); i != middle_.end(); ++i) {
i->LoadedBinary();
}
- longest.LoadedBinary();
+ longest_.LoadedBinary();
}
-template class TemplateHashedSearch<ProbingHashedSearch::Middle, ProbingHashedSearch::Longest>;
-
-template void TemplateHashedSearch<ProbingHashedSearch::Middle, ProbingHashedSearch::Longest>::InitializeFromARPA(const char *, util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &, ProbingVocabulary &vocab, Backing &backing);
+template class HashedSearch<BackoffValue>;
+template class HashedSearch<RestValue>;
} // namespace detail
} // namespace ngram