summaryrefslogtreecommitdiff
path: root/klm/lm/quantize.cc
diff options
context:
space:
mode:
Diffstat (limited to 'klm/lm/quantize.cc')
-rw-r--r--klm/lm/quantize.cc38
1 files changed, 19 insertions, 19 deletions
diff --git a/klm/lm/quantize.cc b/klm/lm/quantize.cc
index 98a5d048..a8e0cb21 100644
--- a/klm/lm/quantize.cc
+++ b/klm/lm/quantize.cc
@@ -1,31 +1,30 @@
+/* Quantize into bins of equal size as described in
+ * M. Federico and N. Bertoldi. 2006. How many bits are needed
+ * to store probabilities for phrase-based translation? In Proc.
+ * of the Workshop on Statistical Machine Translation, pages
+ * 94–101, New York City, June. Association for Computa-
+ * tional Linguistics.
+ */
+
#include "lm/quantize.hh"
#include "lm/binary_format.hh"
#include "lm/lm_exception.hh"
+#include "util/file.hh"
#include <algorithm>
#include <numeric>
-#include <unistd.h>
-
namespace lm {
namespace ngram {
-/* Quantize into bins of equal size as described in
- * M. Federico and N. Bertoldi. 2006. How many bits are needed
- * to store probabilities for phrase-based translation? In Proc.
- * of the Workshop on Statistical Machine Translation, pages
- * 94–101, New York City, June. Association for Computa-
- * tional Linguistics.
- */
-
namespace {
-void MakeBins(float *values, float *values_end, float *centers, uint32_t bins) {
- std::sort(values, values_end);
- const float *start = values, *finish;
+void MakeBins(std::vector<float> &values, float *centers, uint32_t bins) {
+ std::sort(values.begin(), values.end());
+ std::vector<float>::const_iterator start = values.begin(), finish;
for (uint32_t i = 0; i < bins; ++i, ++centers, start = finish) {
- finish = values + (((values_end - values) * static_cast<uint64_t>(i + 1)) / bins);
+ finish = values.begin() + ((values.size() * static_cast<uint64_t>(i + 1)) / bins);
if (finish == start) {
// zero length bucket.
*centers = i ? *(centers - 1) : -std::numeric_limits<float>::infinity();
@@ -41,10 +40,11 @@ const char kSeparatelyQuantizeVersion = 2;
void SeparatelyQuantize::UpdateConfigFromBinary(int fd, const std::vector<uint64_t> &/*counts*/, Config &config) {
char version;
- if (read(fd, &version, 1) != 1 || read(fd, &config.prob_bits, 1) != 1 || read(fd, &config.backoff_bits, 1) != 1)
- UTIL_THROW(util::ErrnoException, "Failed to read header for quantization.");
+ util::ReadOrThrow(fd, &version, 1);
+ util::ReadOrThrow(fd, &config.prob_bits, 1);
+ util::ReadOrThrow(fd, &config.backoff_bits, 1);
if (version != kSeparatelyQuantizeVersion) UTIL_THROW(FormatLoadException, "This file has quantization version " << (unsigned)version << " but the code expects version " << (unsigned)kSeparatelyQuantizeVersion);
- AdvanceOrThrow(fd, -3);
+ util::AdvanceOrThrow(fd, -3);
}
void SeparatelyQuantize::SetupMemory(void *start, const Config &config) {
@@ -66,12 +66,12 @@ void SeparatelyQuantize::Train(uint8_t order, std::vector<float> &prob, std::vec
float *centers = start_ + TableStart(order) + ProbTableLength();
*(centers++) = kNoExtensionBackoff;
*(centers++) = kExtensionBackoff;
- MakeBins(&*backoff.begin(), &*backoff.end(), centers, (1ULL << backoff_bits_) - 2);
+ MakeBins(backoff, centers, (1ULL << backoff_bits_) - 2);
}
void SeparatelyQuantize::TrainProb(uint8_t order, std::vector<float> &prob) {
float *centers = start_ + TableStart(order);
- MakeBins(&*prob.begin(), &*prob.end(), centers, (1ULL << prob_bits_));
+ MakeBins(prob, centers, (1ULL << prob_bits_));
}
void SeparatelyQuantize::FinishedLoading(const Config &config) {