summaryrefslogtreecommitdiff
path: root/utils/synutils/maxent-3.0/lbfgs.cpp
diff options
context:
space:
mode:
authorWu, Ke <wuke@cs.umd.edu>2014-10-07 17:22:11 -0400
committerWu, Ke <wuke@cs.umd.edu>2014-10-07 17:22:11 -0400
commit00968d1ba03c0603440fe5a765b87869b99a0a93 (patch)
treef264c07d9851b47b117839fc9345d7e1d5e880a2 /utils/synutils/maxent-3.0/lbfgs.cpp
parentf762dbbf10a8204d0d0b82e9acb29feacd3b3bb4 (diff)
Apply clang-format
Diffstat (limited to 'utils/synutils/maxent-3.0/lbfgs.cpp')
-rw-r--r--utils/synutils/maxent-3.0/lbfgs.cpp48
1 files changed, 23 insertions, 25 deletions
diff --git a/utils/synutils/maxent-3.0/lbfgs.cpp b/utils/synutils/maxent-3.0/lbfgs.cpp
index 9eb04bef..bd26f048 100644
--- a/utils/synutils/maxent-3.0/lbfgs.cpp
+++ b/utils/synutils/maxent-3.0/lbfgs.cpp
@@ -8,20 +8,17 @@
using namespace std;
-const static int M = LBFGS_M;
+const static int M = LBFGS_M;
const static double LINE_SEARCH_ALPHA = 0.1;
-const static double LINE_SEARCH_BETA = 0.5;
+const static double LINE_SEARCH_BETA = 0.5;
// stopping criteria
-int LBFGS_MAX_ITER = 300;
+int LBFGS_MAX_ITER = 300;
const static double MIN_GRAD_NORM = 0.0001;
-
-double
-ME_Model::backtracking_line_search(
- const Vec & x0, const Vec & grad0, const double f0,
- const Vec & dx, Vec & x, Vec & grad1)
-{
+double ME_Model::backtracking_line_search(const Vec& x0, const Vec& grad0,
+ const double f0, const Vec& dx,
+ Vec& x, Vec& grad1) {
double t = 1.0 / LINE_SEARCH_BETA;
double f;
@@ -39,20 +36,23 @@ ME_Model::backtracking_line_search(
// Jorge Nocedal, "Updating Quasi-Newton Matrices With Limited Storage",
// Mathematics of Computation, Vol. 35, No. 151, pp. 773-782, 1980.
//
-Vec
-approximate_Hg(const int iter, const Vec & grad,
- const Vec s[], const Vec y[], const double z[])
-{
+Vec approximate_Hg(const int iter, const Vec& grad, const Vec s[],
+ const Vec y[], const double z[]) {
int offset, bound;
- if (iter <= M) { offset = 0; bound = iter; }
- else { offset = iter - M; bound = M; }
+ if (iter <= M) {
+ offset = 0;
+ bound = iter;
+ } else {
+ offset = iter - M;
+ bound = M;
+ }
Vec q = grad;
double alpha[M], beta[M];
for (int i = bound - 1; i >= 0; i--) {
const int j = (i + offset) % M;
- alpha[i] = z[j] * dot_product(s[j], q);
- q += -alpha[i] * y[j];
+ alpha[i] = z[j] * dot_product(s[j], q);
+ q += -alpha[i] * y[j];
}
if (iter > 0) {
const int j = (iter - 1) % M;
@@ -63,16 +63,14 @@ approximate_Hg(const int iter, const Vec & grad,
}
for (int i = 0; i <= bound - 1; i++) {
const int j = (i + offset) % M;
- beta[i] = z[j] * dot_product(y[j], q);
- q += s[j] * (alpha[i] - beta[i]);
+ beta[i] = z[j] * dot_product(y[j], q);
+ q += s[j] * (alpha[i] - beta[i]);
}
return q;
}
-vector<double>
-ME_Model::perform_LBFGS(const vector<double> & x0)
-{
+vector<double> ME_Model::perform_LBFGS(const vector<double>& x0) {
const size_t dim = x0.size();
Vec x = x0;
@@ -84,10 +82,11 @@ ME_Model::perform_LBFGS(const vector<double> & x0)
for (int iter = 0; iter < LBFGS_MAX_ITER; iter++) {
- fprintf(stderr, "%3d obj(err) = %f (%6.4f)", iter+1, -f, _train_error);
+ fprintf(stderr, "%3d obj(err) = %f (%6.4f)", iter + 1, -f, _train_error);
if (_nheldout > 0) {
const double heldout_logl = heldout_likelihood();
- fprintf(stderr, " heldout_logl(err) = %f (%6.4f)", heldout_logl, _heldout_error);
+ fprintf(stderr, " heldout_logl(err) = %f (%6.4f)", heldout_logl,
+ _heldout_error);
}
fprintf(stderr, "\n");
@@ -107,4 +106,3 @@ ME_Model::perform_LBFGS(const vector<double> & x0)
return x.STLVec();
}
-