From 10a232656a0c882b3b955d2bcfac138ce11e8a2e Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Tue, 13 Mar 2012 09:15:46 +0100 Subject: polish --- dtrain/README.md | 437 ++---------------------- dtrain/dtrain.cc | 18 +- dtrain/dtrain.h | 2 +- dtrain/hstreaming/avg.rb | 2 +- dtrain/hstreaming/cdec.ini | 2 +- dtrain/hstreaming/hadoop-streaming-job.sh | 23 +- dtrain/hstreaming/rule_count/red.rb | 2 + dtrain/hstreaming/rule_count/rulecount.rb | 2 + dtrain/pairsampling.h | 2 +- dtrain/score.cc | 4 +- dtrain/test/example/cdec.ini | 8 +- dtrain/test/example/dtrain.ini | 28 +- dtrain/test/logreg_cd/bin_class.cc | 4 - dtrain/test/logreg_cd/bin_class.h | 22 -- dtrain/test/logreg_cd/log_reg.cc | 39 --- dtrain/test/logreg_cd/log_reg.h | 14 - dtrain/test/mira_update/Hildreth.cpp | 187 ---------- dtrain/test/mira_update/Hildreth.h | 10 - dtrain/test/mira_update/dtrain.cc | 532 ----------------------------- dtrain/test/mira_update/sample.h | 101 ------ dtrain/test/mtm11/logreg_cd/bin_class.cc | 4 + dtrain/test/mtm11/logreg_cd/bin_class.h | 22 ++ dtrain/test/mtm11/logreg_cd/log_reg.cc | 39 +++ dtrain/test/mtm11/logreg_cd/log_reg.h | 14 + dtrain/test/mtm11/mira_update/Hildreth.cpp | 187 ++++++++++ dtrain/test/mtm11/mira_update/Hildreth.h | 10 + dtrain/test/mtm11/mira_update/dtrain.cc | 532 +++++++++++++++++++++++++++++ dtrain/test/mtm11/mira_update/sample.h | 101 ++++++ dtrain/test/test.in | 3 - dtrain/test/toy/dtrain.ini | 11 +- dtrain/test/toy/in | 2 - dtrain/test/toy/input | 2 + 32 files changed, 1002 insertions(+), 1364 deletions(-) delete mode 100644 dtrain/test/logreg_cd/bin_class.cc delete mode 100644 dtrain/test/logreg_cd/bin_class.h delete mode 100644 dtrain/test/logreg_cd/log_reg.cc delete mode 100644 dtrain/test/logreg_cd/log_reg.h delete mode 100644 dtrain/test/mira_update/Hildreth.cpp delete mode 100644 dtrain/test/mira_update/Hildreth.h delete mode 100644 dtrain/test/mira_update/dtrain.cc delete mode 100644 dtrain/test/mira_update/sample.h create mode 100644 dtrain/test/mtm11/logreg_cd/bin_class.cc create mode 100644 dtrain/test/mtm11/logreg_cd/bin_class.h create mode 100644 dtrain/test/mtm11/logreg_cd/log_reg.cc create mode 100644 dtrain/test/mtm11/logreg_cd/log_reg.h create mode 100644 dtrain/test/mtm11/mira_update/Hildreth.cpp create mode 100644 dtrain/test/mtm11/mira_update/Hildreth.h create mode 100644 dtrain/test/mtm11/mira_update/dtrain.cc create mode 100644 dtrain/test/mtm11/mira_update/sample.h delete mode 100644 dtrain/test/test.in delete mode 100644 dtrain/test/toy/in create mode 100644 dtrain/test/toy/input diff --git a/dtrain/README.md b/dtrain/README.md index 91cf0704..c39d94d2 100644 --- a/dtrain/README.md +++ b/dtrain/README.md @@ -1,409 +1,40 @@ -dtrain -====== +This is a really fast (parallelizable) tuning method for cdec as used here: + "Joint Feature Selection in Distributed Stochastic + Learning for Large-Scale Discriminative Training in + SMT" Simianer, Riezler, Dyer + ACL 2012 -Build & run ------------ -build .. -
-git clone git://github.com/qlt/cdec-dtrain.git
-cd cdec-dtrain
-autoreconf -if[v]
-./configure [--disable-gtest]
-make
-
-and run: -
-cd dtrain/hstreaming/
-(edit ini files)
-edit the vars in hadoop-streaming-job.sh ($ID, $IN and $OUT)
-./hadoop-streaming-job.sh
-
- -Ideas ------ -* *MULTIPARTITE* ranking (1 vs rest, cluster model/score) -* *REMEMBER* sampled translations (merge kbest lists) -* *SELECT* iteration with highest _real_ BLEU on devtest? -* *SYNTHETIC* data? (perfect translation always in kbest) -* *CACHE* ngrams for scoring -* hadoop *PIPES* implementation -* *ITERATION* variants (shuffle resulting weights, re-iterate) -* *MORE THAN ONE* reference for BLEU, paraphrases? -* *RANDOM RESTARTS* or random directions -* use separate *TEST SET* for each shard -* *REDUCE* training set (50k?) -* *SYNTAX* features (CD) -* distribute *DEV* set to all nodes, avg -Notes -------------------------------- -* cdec kbest vs 1best (no -k param), rescoring (ref?)? => ok(?) -* no sparse vector in decoder => fixed/'ok' -* PhraseModel features 0..99, mapping? -* flex scanner jams on bad input, we could skip that -* input/grammar caching (vector -> vector) -* why loo grammars larger? are they? (sort psgs | uniq -> grammar) -* lower beam size to be faster? -* why is -100 in lm so good? -* noise helps for discriminative training? -* what does srilm do with -unk but nothing mapped to unk ( unigram)? - => this: http://www-speech.sri.com/pipermail/srilm-user/2007q4/000543.html -* does AER correlate with BLEU? paper? -* learning rate tuned with perceptron? -* dtrain (perceptron) used for some tests because no optimizer instability -* http://www.ark.cs.cmu.edu/cdyer/dtrain/ -* repeat as often as max needed by any learner! -* don't compare lms (perplex.) with diff vocab (see stupid backoff paper) -* what does mira/pro optimize exactly? -* early stopping (epsilon, no change in kbest list) -* 10-20k rules per sent are normal -* giza vs. berkeleyaligner: giza more/less noise? -* compound splitting -> more rules? -* loo (jackknifing) => ref can't be reached? -* prune singletons -> less noise? (do I do this?) -* random sample: take fixed X at random -* scale of features/weights? - -Features +Building -------- -* baseline features (take whatever cdec implements for VEST) -* rule identifiers (feature name = rule as string) -* rule discounts (taken from frequency i or frequency interval [i,j] of rule in extraction from parallel training data) bins - => from PRO -* target ngrams (from nonterminals in rule rhs), with gaps? -* source-target unigrams (from word alignments used in rule extraction, if they are?) -* lhs, rhs, rule length features -* all other features depend on syntax annotation. -* word alignment - -Todo ------------ -* merge dtrain part-X files, for better blocks (how to do this with 4.5tb ep) -* mapred count shard sents -* mapred stats for learning curve (output weights per iter for eval on devtest) -* 250 forest sampling is real bad, bug? -* metric reporter of bleu for each shard (reporters, status?) - to draw learning curves for all shards in 1 plot -* kenlm not portable (i7-2620M vs Intel(R) Xeon(R) CPU E5620 @ 2.40GHz) -* mapred chaining? hamake? -* make our sigtest work with cdec -* l1l2 red (tsuroke)? -* epsilon stopping criterion -* normalize weight vector to get proper model scores for forest sampling -* 108010 with gap(s), and/or fix (same score in diff groups) -* 108010: combine model score + bleu -* visualize weight vector -* *100 runs stats -* correlation of *_bleu to ibm_bleu -* ep: open lm, cutoff @1 -* tune regs -* 3x3 4x4 5x5 .. 10x10 until standard dev ok, moving avg -* avg weight vector for dtrain? (mira non-avg) -* repeat lm choose with mira/pro -* shuffle training data -* learning rate dynamic (Duh? Tsuroka?) -* divide updates by ? -* mira: 5/10/15, pro: (5)/10/20/30 (on devtest!) -* sample pairs like in pro -* mira forest sampling -* platform specific (108010!) - -Data ----- -
-nc-v6.de-en             apegd
-nc-v6.de-en.loo         apegd
-nc-v6.de-en.giza        apegd
-nc-v6.de-en.giza.loo    apegd
-nc-v6.de-en.cs.giza     apegd
-nc-v6.de-en.cs.giza.loo apegd
-nv-v6.de-en.cs          apegd
-nc-v6.de-en.cs.loo      apegd
---
-ep-v6.de-en.cs          apegd
-ep-v6.de-en.cs.loo      apegd
-
-a: alignment:, p: prep, e: extract,
-g: grammar, d: dtrain
-
- -Experiments +builds when building cdec, see ../BUILDING + +Running +------- +To run this on a dev set locally (default): + +#define DTRAIN_LOCAL + +otherwise remove that line or undef. You need a single grammar file +or per-sentence-grammars (psg) as you would use with cdec. +Additionally you need to give dtrain a file with +references (--refs). + +The input for use with hadoop streaming looks like this: + +\t\t\t + +To convert a psg to this format you need to replace all "\n" +by "\t". Make sure there are no tabs in your data. + +For an example of local usage (with 'distributed' format) +the see test/example/ . This expects dtrain to be built without +DTRAIN_LOCAL param. + +Legal stuff ----------- -[grammar stats - oov on dev/devtest/test - size - #rules (uniq) - time for building - ep: 1.5 days on 278 slots (30 nodes) - nc: ~2 hours ^^^ - - lm stats - oov on dev/devtest/test - perplex on train/dev/devtest/test?] - -[0] -which word alignment? - berkeleyaligner - giza++ as of Sep 24 2011, mgizapp 0.6.3 - --symgiza as of Oct 1 2011-- - --- - NON LOO - (symgiza unreliable) - randomly sample 100 from train with loo - run dtrain for 100 iterations - w/o all other feats (lm, wp, ...) +Glue - measure ibm bleu on exact same sents - ep -> berkeleyaligner ??? (mb per sent, rules per sent) - -*100 -> triples, quadruples - -[1] -lm? - 3-4-5 - open - unk - nounk (-100 for unk) - -- - lm oov weight pos? -100 - no tuning, -100 prob for unk EXPECT: nounk - tuning with dtrain EXPECT: open - => - lmtest on cs.giza.loo??? - -[2] -cs? - 'default' weights - -[3] -loo vs non-loo - 'jackknifing' - generalization (determ.!) on dev, test on devtest - -[4] -stability - all with default params - mira: 100 - pro: 100 - vest: 100 - dtrain: 100 - -[undecided] -do we even need loo for ep? -pro metaparam - (max) iter - regularization - ??? - -mira metaparam - (max) iter: 10 (nc???) vs 15 (ep???) - -features to try - NgramFeatures -> target side ngrams - RuleIdentityFeatures - RuleNgramFeatures -> source side ngrams from rule - RuleShape -> relative orientation of X's and terminals - SpanFeatures -> http://www.cs.cmu.edu/~cdyer/wmt11-sysdesc.pdf - ArityPenalty -> Arity=0 Arity=1 and Arity=2 - ---- -shard size: 500-2k -iterations, re-iterate (shuffle w): 10 -gamma, eta -SVM, perceptron -reducer: avg (feats/shard), l1l2, active on all shards -sentence sampling: forest -pair sampling: all, rand, 108010 (sort), PRO -out of domain test? - ---- -variables to control - -[alignment] - -[lm] - -[vest] - -[mira] - -[dtrain] - -[pro] - - --------- -In PRO, a continually growing list of candidates is maintained for -each sentence by concatenating k-best lists from each decoding run, -and the training pairs are sampled from them. This is done to ensure -that the optimizer doesn't forget about bad places in the parameter -space that it visited previously (since some training samples will be -selected from that space). Something like your approach should work -well though, provided you don't overfit to the sentence pair you're -looking at in each iteration. So I guess the question is: what are you -doing in step 2 exactly? A complete optimization? Taking one step? The -other thing is, do you maintain n-best hypotheses from previous -iterations? - --------- -good grammar? => ability to overfit - berkeley vs giza - not LOO - NO optimizer instability - 20+ iterations - approx_bleu-4 - train on dev => test on dev - train on devtest => test on devtest - dev on dev better? - devtest on devtest better? - (train/test on loo? => lower!) - (test on others => real bad) - - -loo vs non-loo? => generalization - (cs vs non-cs?) - giza||berkeley - LOO + non LOO - 2 fold cross validation - train on dev, test on devtest - train on devtest, test on dev - as above ^^^ - - - --- - -as PRO - - UPDATES: perceptron - - LEARNING RATE: 0.0005 - - GAMMA: - - - #ITERATIONS: 30 - - SCORER: stupid_bleu@4 - - K: 100, 1500?(top X pairs) - - SAMPLE: kbest uniq, kbest no - - PAIR SAMPLING: all, PRO?TODO - - SELECT: best - - FEATURES: baseline, RuleShape+SpanFeatures - --- - - Note: no weight interpolation - no early stopping based on kbest lists (epsilon?TODO) - -dtrain tune reg - - updates: SVM - - pair sampling important! - - learning_rate= 100 50 10 5 1 0.5 0.1 0.05 0.01 0.005 0.001 0.0005 0.0001 0.00005 0.00001 0.000005 0.000001 0.0000005 0.0000001 0.0000000001 - - - gamma= - - - scorer: stupid_bleu 3 - - test weights: last - - - - - - test: devtest - - ---- -weights visualization (blocks, color coded) -zig zag!? -repeat all basic exps with training set -merge? - - - - ---sample_from ---k ---filter ---pair_sampling ---N ---epochs ---scorer ---learning_rate ---gamma ---select_weights -[--unit_weight_vector] -[--l1_reg] -[--l1_reg_strength] - ---------- -corr best = really best? -108010gaps - -coltrane: 9 -gillespie: 9 -staley: 2 -io: 6 -ioh: 4 - slots - - -when does overfitting begin? ---- -Variables - k 100..1500 higher better - N 3/4 - learning rate - reg/gamma - epochs -> best on devtest (10..30) (select_weights) - scorer -> approx_bleu correlates ok (stupid bleu, bleu, smooth bleu) - sample from -> kbest | forest - filter -> no uniq (kbest) - pair sampling -> all 5050 108010 PRO alld - update_ok -> update towards correctly ranked - features - 6x tm - 2x lm - wp - Glue - rule ids - rule ngrams - rule shape - span features - - -PRO - k = 1500 - N = 4 - learning rate = 0.0005 - gamma = 0 - epochs = 30 - scorer = stupid bleu (Bleu+1) - sample from = kbest - filter = no - pair sampling = PRO - update_ok - features = base - -cur: - shard_sz 500 1k 3k - PRO with forest sampling - PRO w/o update_ok - tune learning rate - all with discard (not only top 50) - filter kbest uniq? - - -> repeat most on Tset, lXlX stuff - -> PRO approx bleu - -> tune gamma - -> best pair sampling method - -> reduce k? - => scorer => approx_bleu (test w PRO) - -> PRO on training set - -> PRO more features - -> discard + 108010 - - - --- -forest vs kbest count vocab? -108010 select discard -approx bleu - - +Copyright (c) 2012 by Patrick Simianer +See the file ../LICENSE.txt for the licensing terms that this software is +released under. ---- -re-iterate ruleids -r_ -10s -p30 -stopwords -gillespie wtf diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc index 3111ce5d..fb6c6880 100644 --- a/dtrain/dtrain.cc +++ b/dtrain/dtrain.cc @@ -376,15 +376,16 @@ main(int argc, char** argv) vector* samples = observer->GetSamples(); if (verbose) { - cerr << "--- ref for " << ii << " "; + cerr << "--- ref for " << ii << ": "; if (t > 0) printWordIDVec(ref_ids_buf[ii]); else printWordIDVec(ref_ids); + cerr << endl; for (unsigned u = 0; u < samples->size(); u++) { cerr << _p5 << _np << "[" << u << ". '"; printWordIDVec((*samples)[u].w); cerr << "'" << endl; - cerr << "SCORE=" << (*samples)[0].score << ",model="<< (*samples)[0].model << endl; - cerr << "F{" << (*samples)[0].f << "} ]" << endl << endl; + cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl; + cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl; } } @@ -434,11 +435,7 @@ main(int argc, char** argv) } } - //////// - // TEST THIS - // reset cumulative_penalties after 1 iter? - // do this only once per INPUT (not per pair) -if (false) { + // l1 regularization if (l1naive) { for (unsigned d = 0; d < lambdas.size(); d++) { weight_t v = lambdas.get(d); @@ -471,9 +468,8 @@ if (false) { } } } + } -} - //////// if (rescale) lambdas /= lambdas.l2norm(); @@ -523,7 +519,7 @@ if (false) { if (!quiet || hstreaming) nonz = (unsigned)lambdas.size_nonzero(); if (!quiet) { - cerr << _p5 << _p << "WEIGHTS" << endl; + cerr << _p9 << _p << "WEIGHTS" << endl; for (vector::iterator it = print_weights.begin(); it != print_weights.end(); it++) { cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl; } diff --git a/dtrain/dtrain.h b/dtrain/dtrain.h index 783aa179..59ceb6f6 100644 --- a/dtrain/dtrain.h +++ b/dtrain/dtrain.h @@ -13,7 +13,7 @@ #include "filelib.h" -#define DTRAIN_LOCAL +//#define DTRAIN_LOCAL #define DTRAIN_DOTS 10 // when to display a '.' #define DTRAIN_GRAMMAR_DELIM "########EOS########" diff --git a/dtrain/hstreaming/avg.rb b/dtrain/hstreaming/avg.rb index e0899144..91d4e29a 100755 --- a/dtrain/hstreaming/avg.rb +++ b/dtrain/hstreaming/avg.rb @@ -1,4 +1,4 @@ -# avg.rb +#!/usr/bin/env ruby shard_count_key = "__SHARD_COUNT__" diff --git a/dtrain/hstreaming/cdec.ini b/dtrain/hstreaming/cdec.ini index ce1e1ae2..61f13e86 100644 --- a/dtrain/hstreaming/cdec.ini +++ b/dtrain/hstreaming/cdec.ini @@ -4,7 +4,7 @@ scfg_max_span_limit=15 intersection_strategy=cube_pruning cubepruning_pop_limit=200 feature_function=WordPenalty -feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz +feature_function=KLanguageModel nc-wmt11.en.srilm.gz #feature_function=ArityPenalty #feature_function=CMR2008ReorderingFeatures #feature_function=InputIndicator diff --git a/dtrain/hstreaming/hadoop-streaming-job.sh b/dtrain/hstreaming/hadoop-streaming-job.sh index 4c0238f3..90c2b790 100755 --- a/dtrain/hstreaming/hadoop-streaming-job.sh +++ b/dtrain/hstreaming/hadoop-streaming-job.sh @@ -1,26 +1,31 @@ -#!/bin/bash +#!/bin/sh -EXP=test +EXP=a_simple_test +# change these vars to fit your hadoop installation HADOOP_HOME=/usr/lib/hadoop-0.20 JAR=contrib/streaming/hadoop-streaming-0.20.2-cdh3u1.jar HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" +# ^^^ - IN=nc-v6.de-en.cs.giza.loo/nc-v6.de-en.cs.giza.loo-dtrain1.sz2 -OUT=out/$EXP-weights + IN=input_on_hdfs +OUT=output_weights_on_hdfs +# you can remove the -reducer line if you want to +# do feature selection/averaging locally (e.g. to +# keep weights of the iterations) $HSTREAMING \ -mapper "dtrain.sh" \ - -reducer "red-avg.rb" \ + -reducer "lplp.rb l2 select_k 100000" \ -input $IN \ -output $OUT \ -file dtrain.sh \ - -file red-avg.rb \ - -file ~/exp/cdec-dtrain-ro/dtrain/dtrain \ + -file lplp.rb \ + -file ../dtrain \ -file dtrain.ini \ -file cdec.ini \ - -file ~/exp/data/nc-v6.en.3.unk.probing.kenv5 \ - -jobconf mapred.reduce.tasks=1 \ + -file ../test/example/nc-wmt11.en.srilm.gz \ + -jobconf mapred.reduce.tasks=30 \ -jobconf mapred.max.map.failures.percent=0 \ -jobconf mapred.job.name="dtrain $EXP" diff --git a/dtrain/hstreaming/rule_count/red.rb b/dtrain/hstreaming/rule_count/red.rb index 8f9109cc..874ae7ac 100644 --- a/dtrain/hstreaming/rule_count/red.rb +++ b/dtrain/hstreaming/rule_count/red.rb @@ -1,3 +1,5 @@ +#!/usr/bin/env ruby + STDIN.set_encoding 'utf-8' STDOUT.set_encoding 'utf-8' diff --git a/dtrain/hstreaming/rule_count/rulecount.rb b/dtrain/hstreaming/rule_count/rulecount.rb index 035bdf06..67361fa4 100644 --- a/dtrain/hstreaming/rule_count/rulecount.rb +++ b/dtrain/hstreaming/rule_count/rulecount.rb @@ -1,3 +1,5 @@ +#!/usr/bin/env ruby + STDIN.set_encoding 'utf-8' STDOUT.set_encoding 'utf-8' diff --git a/dtrain/pairsampling.h b/dtrain/pairsampling.h index e866c8a0..1fc5b8a0 100644 --- a/dtrain/pairsampling.h +++ b/dtrain/pairsampling.h @@ -32,7 +32,7 @@ all_pairs(vector* s, vector >& training, sc * multipartite ranking * sort by bleu * compare top 10% to middle 80% and low 10% - * 80% to low 10% + * cmp middle 80% to low 10% */ bool _108010_cmp_hyp_by_score(ScoredHyp a, ScoredHyp b) diff --git a/dtrain/score.cc b/dtrain/score.cc index f5e920a0..4cde638a 100644 --- a/dtrain/score.cc +++ b/dtrain/score.cc @@ -11,7 +11,7 @@ namespace dtrain * of Machine Translation" * (Papineni et al. '02) * - * NOTE: 0 if one n in {1..N} has 0 count + * NOTE: 0 if for one n \in {1..N} count is 0 */ score_t BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len) @@ -96,6 +96,8 @@ SmoothBleuScorer::Score(vector& hyp, vector& ref, * as in "Online Large-Margin Training of Syntactic * and Structural Translation Features" * (Chiang et al. '08) + * + * NOTE: needs some code in dtrain.cc */ score_t ApproxBleuScorer::Score(vector& hyp, vector& ref, diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini index ad958ca6..fe5ca759 100644 --- a/dtrain/test/example/cdec.ini +++ b/dtrain/test/example/cdec.ini @@ -5,6 +5,7 @@ intersection_strategy=cube_pruning cubepruning_pop_limit=30 feature_function=WordPenalty feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz +# all currently working feature function for translation: #feature_function=ArityPenalty #feature_function=CMR2008ReorderingFeatures #feature_function=Dwarf @@ -14,9 +15,10 @@ feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz #feature_function=NgramFeatures #feature_function=NonLatinCount #feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape +feature_function=RuleIdentityFeatures +feature_function=RuleNgramFeatures +feature_function=RuleShape #feature_function=SourceSpanSizeFeatures #feature_function=SourceWordPenalty #feature_function=SpanFeatures +# ^^^ features active that were used in the ACL paper diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini index ed1b7e5f..68173e11 100644 --- a/dtrain/test/example/dtrain.ini +++ b/dtrain/test/example/dtrain.ini @@ -1,20 +1,20 @@ input=test/example/nc-wmt11.1k.gz # use '-' for stdin -output=w.gz # a weights file -decoder_config=test/example/cdec.ini # a ini for cdec +output=- # a weights file or stdout +decoder_config=test/example/cdec.ini # ini for cdec # these will be printed on each iteration print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough tmp=/tmp -stop_after=20 +stop_after=10 # stop iteration after 10 inputs # interesting stuff -epochs=1 -k=100 -N=4 -learning_rate=0.0001 -gamma=0.00001 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=108010 -pair_threshold=0.01 -select_weights=last +epochs=3 # run over input 3 times +k=200 # use 100best lists +N=4 # optimize (approx) BLEU4 +learning_rate=0.0001 # learning rate +gamma=0.00001 # use SVM reg +scorer=stupid_bleu # use stupid BLEU+1 approx. +sample_from=kbest # use kbest lists (as opposed to forest) +filter=uniq # only uniq entries in kbest +pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10 +pair_threshold=0 # minimum distance in BLEU +select_weights=last # just output last weights diff --git a/dtrain/test/logreg_cd/bin_class.cc b/dtrain/test/logreg_cd/bin_class.cc deleted file mode 100644 index 19bcde25..00000000 --- a/dtrain/test/logreg_cd/bin_class.cc +++ /dev/null @@ -1,4 +0,0 @@ -#include "bin_class.h" - -Objective::~Objective() {} - diff --git a/dtrain/test/logreg_cd/bin_class.h b/dtrain/test/logreg_cd/bin_class.h deleted file mode 100644 index 3466109a..00000000 --- a/dtrain/test/logreg_cd/bin_class.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _BIN_CLASS_H_ -#define _BIN_CLASS_H_ - -#include -#include "sparse_vector.h" - -struct TrainingInstance { - // TODO add other info? loss for MIRA-type updates? - SparseVector x_feature_map; - bool y; -}; - -struct Objective { - virtual ~Objective(); - - // returns f(x) and f'(x) - virtual double ObjectiveAndGradient(const SparseVector& x, - const std::vector& training_instances, - SparseVector* g) const = 0; -}; - -#endif diff --git a/dtrain/test/logreg_cd/log_reg.cc b/dtrain/test/logreg_cd/log_reg.cc deleted file mode 100644 index ec2331fe..00000000 --- a/dtrain/test/logreg_cd/log_reg.cc +++ /dev/null @@ -1,39 +0,0 @@ -#include "log_reg.h" - -#include -#include - -#include "sparse_vector.h" - -using namespace std; - -double LogisticRegression::ObjectiveAndGradient(const SparseVector& x, - const vector& training_instances, - SparseVector* g) const { - double cll = 0; - for (int i = 0; i < training_instances.size(); ++i) { - const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0] - double lp_false = dotprod; - double lp_true = -dotprod; - if (0 < lp_true) { - lp_true += log1p(exp(-lp_true)); - lp_false = log1p(exp(lp_false)); - } else { - lp_true = log1p(exp(lp_true)); - lp_false += log1p(exp(-lp_false)); - } - lp_true *= -1; - lp_false *= -1; - if (training_instances[i].y) { // true label - cll -= lp_true; - (*g) -= training_instances[i].x_feature_map * exp(lp_false); - // (*g)[0] -= exp(lp_false); // bias - } else { // false label - cll -= lp_false; - (*g) += training_instances[i].x_feature_map * exp(lp_true); - // g += corpus[i].second * exp(lp_true); - } - } - return cll; -} - diff --git a/dtrain/test/logreg_cd/log_reg.h b/dtrain/test/logreg_cd/log_reg.h deleted file mode 100644 index ecc560b8..00000000 --- a/dtrain/test/logreg_cd/log_reg.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _LOG_REG_H_ -#define _LOG_REG_H_ - -#include -#include "sparse_vector.h" -#include "bin_class.h" - -struct LogisticRegression : public Objective { - double ObjectiveAndGradient(const SparseVector& x, - const std::vector& training_instances, - SparseVector* g) const; -}; - -#endif diff --git a/dtrain/test/mira_update/Hildreth.cpp b/dtrain/test/mira_update/Hildreth.cpp deleted file mode 100644 index 0e67eb15..00000000 --- a/dtrain/test/mira_update/Hildreth.cpp +++ /dev/null @@ -1,187 +0,0 @@ -#include "Hildreth.h" -#include "sparse_vector.h" - -using namespace std; - -namespace Mira { - vector Hildreth::optimise (vector< SparseVector >& a, vector& b) { - - size_t i; - int max_iter = 10000; - double eps = 0.00000001; - double zero = 0.000000000001; - - vector alpha ( b.size() ); - vector F ( b.size() ); - vector kkt ( b.size() ); - - double max_kkt = -1e100; - - size_t K = b.size(); - - double A[K][K]; - bool is_computed[K]; - for ( i = 0; i < K; i++ ) - { - A[i][i] = a[i].dot(a[i]); - is_computed[i] = false; - } - - int max_kkt_i = -1; - - - for ( i = 0; i < b.size(); i++ ) - { - F[i] = b[i]; - kkt[i] = F[i]; - if ( kkt[i] > max_kkt ) - { - max_kkt = kkt[i]; - max_kkt_i = i; - } - } - - int iter = 0; - double diff_alpha; - double try_alpha; - double add_alpha; - - while ( max_kkt >= eps && iter < max_iter ) - { - - diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; - try_alpha = alpha[max_kkt_i] + diff_alpha; - add_alpha = 0.0; - - if ( try_alpha < 0.0 ) - add_alpha = -1.0 * alpha[max_kkt_i]; - else - add_alpha = diff_alpha; - - alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; - - if ( !is_computed[max_kkt_i] ) - { - for ( i = 0; i < K; i++ ) - { - A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 - //A[i][max_kkt_i] = 0; // for version 1 - is_computed[max_kkt_i] = true; - } - } - - for ( i = 0; i < F.size(); i++ ) - { - F[i] -= add_alpha * A[i][max_kkt_i]; - kkt[i] = F[i]; - if ( alpha[i] > zero ) - kkt[i] = abs ( F[i] ); - } - max_kkt = -1e100; - max_kkt_i = -1; - for ( i = 0; i < F.size(); i++ ) - if ( kkt[i] > max_kkt ) - { - max_kkt = kkt[i]; - max_kkt_i = i; - } - - iter++; - } - - return alpha; - } - - vector Hildreth::optimise (vector< SparseVector >& a, vector& b, double C) { - - size_t i; - int max_iter = 10000; - double eps = 0.00000001; - double zero = 0.000000000001; - - vector alpha ( b.size() ); - vector F ( b.size() ); - vector kkt ( b.size() ); - - double max_kkt = -1e100; - - size_t K = b.size(); - - double A[K][K]; - bool is_computed[K]; - for ( i = 0; i < K; i++ ) - { - A[i][i] = a[i].dot(a[i]); - is_computed[i] = false; - } - - int max_kkt_i = -1; - - - for ( i = 0; i < b.size(); i++ ) - { - F[i] = b[i]; - kkt[i] = F[i]; - if ( kkt[i] > max_kkt ) - { - max_kkt = kkt[i]; - max_kkt_i = i; - } - } - - int iter = 0; - double diff_alpha; - double try_alpha; - double add_alpha; - - while ( max_kkt >= eps && iter < max_iter ) - { - - diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; - try_alpha = alpha[max_kkt_i] + diff_alpha; - add_alpha = 0.0; - - if ( try_alpha < 0.0 ) - add_alpha = -1.0 * alpha[max_kkt_i]; - else if (try_alpha > C) - add_alpha = C - alpha[max_kkt_i]; - else - add_alpha = diff_alpha; - - alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; - - if ( !is_computed[max_kkt_i] ) - { - for ( i = 0; i < K; i++ ) - { - A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 - //A[i][max_kkt_i] = 0; // for version 1 - is_computed[max_kkt_i] = true; - } - } - - for ( i = 0; i < F.size(); i++ ) - { - F[i] -= add_alpha * A[i][max_kkt_i]; - kkt[i] = F[i]; - if (alpha[i] > C - zero) - kkt[i]=-kkt[i]; - else if (alpha[i] > zero) - kkt[i] = abs(F[i]); - - } - max_kkt = -1e100; - max_kkt_i = -1; - for ( i = 0; i < F.size(); i++ ) - if ( kkt[i] > max_kkt ) - { - max_kkt = kkt[i]; - max_kkt_i = i; - } - - iter++; - } - - return alpha; - } -} diff --git a/dtrain/test/mira_update/Hildreth.h b/dtrain/test/mira_update/Hildreth.h deleted file mode 100644 index 8d791085..00000000 --- a/dtrain/test/mira_update/Hildreth.h +++ /dev/null @@ -1,10 +0,0 @@ -#include "sparse_vector.h" - -namespace Mira { - class Hildreth { - public : - static std::vector optimise(std::vector< SparseVector >& a, std::vector& b); - static std::vector optimise(std::vector< SparseVector >& a, std::vector& b, double C); - }; -} - diff --git a/dtrain/test/mira_update/dtrain.cc b/dtrain/test/mira_update/dtrain.cc deleted file mode 100644 index 933417a4..00000000 --- a/dtrain/test/mira_update/dtrain.cc +++ /dev/null @@ -1,532 +0,0 @@ -#include "common.h" -#include "kbestget.h" -#include "util.h" -#include "sample.h" -#include "Hildreth.h" - -#include "ksampler.h" - -// boost compression -#include -#include -#include -//#include -//#include -using namespace boost::iostreams; - - -#ifdef DTRAIN_DEBUG -#include "tests.h" -#endif - - -/* - * init - * - */ -bool -init(int argc, char** argv, po::variables_map* cfg) -{ - po::options_description conff( "Configuration File Options" ); - size_t k, N, T, stop, n_pairs; - string s, f, update_type; - conff.add_options() - ( "decoder_config", po::value(), "configuration file for cdec" ) - ( "kbest", po::value(&k)->default_value(DTRAIN_DEFAULT_K), "k for kbest" ) - ( "ngrams", po::value(&N)->default_value(DTRAIN_DEFAULT_N), "N for Ngrams" ) - ( "filter", po::value(&f)->default_value("unique"), "filter kbest list" ) - ( "epochs", po::value(&T)->default_value(DTRAIN_DEFAULT_T), "# of iterations T" ) - ( "input", po::value(), "input file" ) - ( "scorer", po::value(&s)->default_value(DTRAIN_DEFAULT_SCORER), "scoring metric" ) - ( "output", po::value(), "output weights file" ) - ( "stop_after", po::value(&stop)->default_value(0), "stop after X input sentences" ) - ( "weights_file", po::value(), "input weights file (e.g. from previous iteration)" ) - ( "wprint", po::value(), "weights to print on each iteration" ) - ( "noup", po::value()->zero_tokens(), "do not update weights" ); - - po::options_description clo("Command Line Options"); - clo.add_options() - ( "config,c", po::value(), "dtrain config file" ) - ( "quiet,q", po::value()->zero_tokens(), "be quiet" ) - ( "update-type", po::value(&update_type)->default_value("mira"), "perceptron or mira" ) - ( "n-pairs", po::value(&n_pairs)->default_value(10), "number of pairs used to compute update" ) - ( "verbose,v", po::value()->zero_tokens(), "be verbose" ) -#ifndef DTRAIN_DEBUG - ; -#else - ( "test", "run tests and exit"); -#endif - po::options_description config_options, cmdline_options; - - config_options.add(conff); - cmdline_options.add(clo); - cmdline_options.add(conff); - - po::store( parse_command_line(argc, argv, cmdline_options), *cfg ); - if ( cfg->count("config") ) { - ifstream config( (*cfg)["config"].as().c_str() ); - po::store( po::parse_config_file(config, config_options), *cfg ); - } - po::notify(*cfg); - - if ( !cfg->count("decoder_config") || !cfg->count("input") ) { - cerr << cmdline_options << endl; - return false; - } - if ( cfg->count("noup") && cfg->count("decode") ) { - cerr << "You can't use 'noup' and 'decode' at once." << endl; - return false; - } - if ( cfg->count("filter") && (*cfg)["filter"].as() != "unique" - && (*cfg)["filter"].as() != "no" ) { - cerr << "Wrong 'filter' type: '" << (*cfg)["filter"].as() << "'." << endl; - } - #ifdef DTRAIN_DEBUG - if ( !cfg->count("test") ) { - cerr << cmdline_options << endl; - return false; - } - #endif - return true; -} - - -// output formatting -ostream& _nopos( ostream& out ) { return out << resetiosflags( ios::showpos ); } -ostream& _pos( ostream& out ) { return out << setiosflags( ios::showpos ); } -ostream& _prec2( ostream& out ) { return out << setprecision(2); } -ostream& _prec5( ostream& out ) { return out << setprecision(5); } - - - - -/* - * dtrain - * - */ -int -main( int argc, char** argv ) -{ - cout << setprecision( 5 ); - // handle most parameters - po::variables_map cfg; - if ( ! init(argc, argv, &cfg) ) exit(1); // something is wrong -#ifdef DTRAIN_DEBUG - if ( cfg.count("test") ) run_tests(); // run tests and exit -#endif - bool quiet = false; - if ( cfg.count("quiet") ) quiet = true; - bool verbose = false; - if ( cfg.count("verbose") ) verbose = true; - bool noup = false; - if ( cfg.count("noup") ) noup = true; - const size_t k = cfg["kbest"].as(); - const size_t N = cfg["ngrams"].as(); - const size_t T = cfg["epochs"].as(); - const size_t stop_after = cfg["stop_after"].as(); - const string filter_type = cfg["filter"].as(); - const string update_type = cfg["update-type"].as(); - const size_t n_pairs = cfg["n-pairs"].as(); - const string output_file = cfg["output"].as(); - if ( !quiet ) { - cout << endl << "dtrain" << endl << "Parameters:" << endl; - cout << setw(25) << "k " << k << endl; - cout << setw(25) << "N " << N << endl; - cout << setw(25) << "T " << T << endl; - if ( cfg.count("stop-after") ) - cout << setw(25) << "stop_after " << stop_after << endl; - if ( cfg.count("weights") ) - cout << setw(25) << "weights " << cfg["weights"].as() << endl; - cout << setw(25) << "input " << "'" << cfg["input"].as() << "'" << endl; - cout << setw(25) << "filter " << "'" << filter_type << "'" << endl; - } - - vector wprint; - if ( cfg.count("wprint") ) { - boost::split( wprint, cfg["wprint"].as(), boost::is_any_of(" ") ); - } - - // setup decoder, observer - register_feature_functions(); - SetSilent(true); - ReadFile ini_rf( cfg["decoder_config"].as() ); - if ( !quiet ) - cout << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl; - Decoder decoder( ini_rf.stream() ); - //KBestGetter observer( k, filter_type ); - MT19937 rng; - KSampler observer( k, &rng ); - - // scoring metric/scorer - string scorer_str = cfg["scorer"].as(); - double (*scorer)( NgramCounts&, const size_t, const size_t, size_t, vector ); - if ( scorer_str == "bleu" ) { - scorer = &bleu; - } else if ( scorer_str == "stupid_bleu" ) { - scorer = &stupid_bleu; - } else if ( scorer_str == "smooth_bleu" ) { - scorer = &smooth_bleu; - } else if ( scorer_str == "approx_bleu" ) { - scorer = &approx_bleu; - } else { - cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; - exit(1); - } - // for approx_bleu - NgramCounts global_counts( N ); // counts for 1 best translations - size_t global_hyp_len = 0; // sum hypothesis lengths - size_t global_ref_len = 0; // sum reference lengths - // this is all BLEU implmentations - vector bleu_weights; // we leave this empty -> 1/N; TODO? - if ( !quiet ) cout << setw(26) << "scorer '" << scorer_str << "'" << endl << endl; - - // init weights - Weights weights; - if ( cfg.count("weights") ) weights.InitFromFile( cfg["weights"].as() ); - SparseVector lambdas; - weights.InitSparseVector( &lambdas ); - vector dense_weights; - - // input - if ( !quiet && !verbose ) - cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl; - string input_fn = cfg["input"].as(); - ifstream input; - if ( input_fn != "-" ) input.open( input_fn.c_str() ); - string in; - vector in_split; // input: src\tref\tpsg - vector ref_tok; // tokenized reference - vector ref_ids; // reference as vector of WordID - string grammar_str; - - // buffer input for t > 0 - vector src_str_buf; // source strings, TODO? memory - vector > ref_ids_buf; // references as WordID vecs - filtering_ostream grammar_buf; // written to compressed file in /tmp - // this is for writing the grammar buffer file - grammar_buf.push( gzip_compressor() ); - char grammar_buf_tmp_fn[] = DTRAIN_TMP_DIR"/dtrain-grammars-XXXXXX"; - mkstemp( grammar_buf_tmp_fn ); - grammar_buf.push( file_sink(grammar_buf_tmp_fn, ios::binary | ios::trunc) ); - - size_t sid = 0, in_sz = 99999999; // sentence id, input size - double acc_1best_score = 0., acc_1best_model = 0.; - vector > scores_per_iter; - double max_score = 0.; - size_t best_t = 0; - bool next = false, stop = false; - double score = 0.; - size_t cand_len = 0; - double overall_time = 0.; - - // for the perceptron/SVM; TODO as params - double eta = 0.0005; - double gamma = 0.;//01; // -> SVM - lambdas.add_value( FD::Convert("__bias"), 0 ); - - // for random sampling - srand ( time(NULL) ); - - - for ( size_t t = 0; t < T; t++ ) // T epochs - { - - time_t start, end; - time( &start ); - - // actually, we need only need this if t > 0 FIXME - ifstream grammar_file( grammar_buf_tmp_fn, ios_base::in | ios_base::binary ); - filtering_istream grammar_buf_in; - grammar_buf_in.push( gzip_decompressor() ); - grammar_buf_in.push( grammar_file ); - - // reset average scores - acc_1best_score = acc_1best_model = 0.; - - // reset sentence counter - sid = 0; - - if ( !quiet ) cout << "Iteration #" << t+1 << " of " << T << "." << endl; - - while( true ) - { - - // get input from stdin or file - in.clear(); - next = stop = false; // next iteration, premature stop - if ( t == 0 ) { - if ( input_fn == "-" ) { - if ( !getline(cin, in) ) next = true; - } else { - if ( !getline(input, in) ) next = true; - } - } else { - if ( sid == in_sz ) next = true; // stop if we reach the end of our input - } - // stop after X sentences (but still iterate for those) - if ( stop_after > 0 && stop_after == sid && !next ) stop = true; - - // produce some pretty output - if ( !quiet && !verbose ) { - if ( sid == 0 ) cout << " "; - if ( (sid+1) % (DTRAIN_DOTS) == 0 ) { - cout << "."; - cout.flush(); - } - if ( (sid+1) % (20*DTRAIN_DOTS) == 0) { - cout << " " << sid+1 << endl; - if ( !next && !stop ) cout << " "; - } - if ( stop ) { - if ( sid % (20*DTRAIN_DOTS) != 0 ) cout << " " << sid << endl; - cout << "Stopping after " << stop_after << " input sentences." << endl; - } else { - if ( next ) { - if ( sid % (20*DTRAIN_DOTS) != 0 ) { - cout << " " << sid << endl; - } - } - } - } - - // next iteration - if ( next || stop ) break; - - // weights - dense_weights.clear(); - weights.InitFromVector( lambdas ); - weights.InitVector( &dense_weights ); - decoder.SetWeights( dense_weights ); - - if ( t == 0 ) { - // handling input - in_split.clear(); - boost::split( in_split, in, boost::is_any_of("\t") ); // in_split[0] is id - // getting reference - ref_tok.clear(); ref_ids.clear(); - boost::split( ref_tok, in_split[2], boost::is_any_of(" ") ); - register_and_convert( ref_tok, ref_ids ); - ref_ids_buf.push_back( ref_ids ); - // process and set grammar - bool broken_grammar = true; - for ( string::iterator ti = in_split[3].begin(); ti != in_split[3].end(); ti++ ) { - if ( !isspace(*ti) ) { - broken_grammar = false; - break; - } - } - if ( broken_grammar ) continue; - grammar_str = boost::replace_all_copy( in_split[3], " __NEXT__RULE__ ", "\n" ) + "\n"; // FIXME copy, __ - grammar_buf << grammar_str << DTRAIN_GRAMMAR_DELIM << endl; - decoder.SetSentenceGrammarFromString( grammar_str ); - // decode, kbest - src_str_buf.push_back( in_split[1] ); - decoder.Decode( in_split[1], &observer ); - } else { - // get buffered grammar - grammar_str.clear(); - int i = 1; - while ( true ) { - string g; - getline( grammar_buf_in, g ); - if ( g == DTRAIN_GRAMMAR_DELIM ) break; - grammar_str += g+"\n"; - i += 1; - } - decoder.SetSentenceGrammarFromString( grammar_str ); - // decode, kbest - decoder.Decode( src_str_buf[sid], &observer ); - } - - // get kbest list - KBestList* kb; - //if ( ) { // TODO get from forest - kb = observer.GetKBest(); - //} - - // scoring kbest - if ( t > 0 ) ref_ids = ref_ids_buf[sid]; - for ( size_t i = 0; i < kb->GetSize(); i++ ) { - NgramCounts counts = make_ngram_counts( ref_ids, kb->sents[i], N ); - // this is for approx bleu - if ( scorer_str == "approx_bleu" ) { - if ( i == 0 ) { // 'context of 1best translations' - global_counts += counts; - global_hyp_len += kb->sents[i].size(); - global_ref_len += ref_ids.size(); - counts.reset(); - cand_len = 0; - } else { - cand_len = kb->sents[i].size(); - } - NgramCounts counts_tmp = global_counts + counts; - // TODO as param - score = 0.9 * scorer( counts_tmp, - global_ref_len, - global_hyp_len + cand_len, N, bleu_weights ); - } else { - // other scorers - cand_len = kb->sents[i].size(); - score = scorer( counts, - ref_ids.size(), - kb->sents[i].size(), N, bleu_weights ); - } - - kb->scores.push_back( score ); - - if ( i == 0 ) { - acc_1best_score += score; - acc_1best_model += kb->model_scores[i]; - } - - if ( verbose ) { - if ( i == 0 ) cout << "'" << TD::GetString( ref_ids ) << "' [ref]" << endl; - cout << _prec5 << _nopos << "[hyp " << i << "] " << "'" << TD::GetString( kb->sents[i] ) << "'"; - cout << " [SCORE=" << score << ",model="<< kb->model_scores[i] << "]" << endl; - cout << kb->feats[i] << endl; // this is maybe too verbose - } - } // Nbest loop - - if ( verbose ) cout << endl; - - - // UPDATE WEIGHTS - if ( !noup ) { - - TrainingInstances pairs; - sample_all( kb, pairs, n_pairs ); - - vector< SparseVector > featureValueDiffs; - vector lossMinusModelScoreDiffs; - for ( TrainingInstances::iterator ti = pairs.begin(); - ti != pairs.end(); ti++ ) { - - SparseVector dv; - if ( ti->first_score - ti->second_score < 0 ) { - dv = ti->second - ti->first; - dv.add_value( FD::Convert("__bias"), -1 ); - - featureValueDiffs.push_back(dv); - double lossMinusModelScoreDiff = ti->loss_diff - ti->model_score_diff; - lossMinusModelScoreDiffs.push_back(lossMinusModelScoreDiff); - - if (update_type == "perceptron") { - lambdas += dv * eta; - cerr << "after perceptron update: " << lambdas << endl << endl; - } - - if ( verbose ) { - cout << "{{ f("<< ti->first_rank <<") > f(" << ti->second_rank << ") but g(i)="<< ti->first_score <<" < g(j)="<< ti->second_score << " so update" << endl; - cout << " i " << TD::GetString(kb->sents[ti->first_rank]) << endl; - cout << " " << kb->feats[ti->first_rank] << endl; - cout << " j " << TD::GetString(kb->sents[ti->second_rank]) << endl; - cout << " " << kb->feats[ti->second_rank] << endl; - cout << " diff vec: " << dv << endl; - cout << " lambdas after update: " << lambdas << endl; - cout << "}}" << endl; - } - } else { - //SparseVector reg; - //reg = lambdas * ( 2 * gamma ); - //lambdas += reg * ( -eta ); - } - } - cerr << "Collected " << featureValueDiffs.size() << " constraints." << endl; - - double slack = 0.01; - if (update_type == "mira") { - if (featureValueDiffs.size() > 0) { - vector alphas; - if (slack != 0) { - alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs, slack); - } else { - alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs); - } - - for (size_t k = 0; k < featureValueDiffs.size(); ++k) { - lambdas += featureValueDiffs[k] * alphas[k]; - } - // cerr << "after mira update: " << lambdas << endl << endl; - } - } - } - - ++sid; - - } // input loop - - if ( t == 0 ) in_sz = sid; // remember size (lines) of input - - // print some stats - double avg_1best_score = acc_1best_score/(double)in_sz; - double avg_1best_model = acc_1best_model/(double)in_sz; - double avg_1best_score_diff, avg_1best_model_diff; - if ( t > 0 ) { - avg_1best_score_diff = avg_1best_score - scores_per_iter[t-1][0]; - avg_1best_model_diff = avg_1best_model - scores_per_iter[t-1][1]; - } else { - avg_1best_score_diff = avg_1best_score; - avg_1best_model_diff = avg_1best_model; - } - cout << _prec5 << _pos << "WEIGHTS" << endl; - for (vector::iterator it = wprint.begin(); it != wprint.end(); it++) { - cout << setw(16) << *it << " = " << dense_weights[FD::Convert( *it )] << endl; - } - - cout << " ---" << endl; - cout << _nopos << " avg score: " << avg_1best_score; - cout << _pos << " (" << avg_1best_score_diff << ")" << endl; - cout << _nopos << "avg model score: " << avg_1best_model; - cout << _pos << " (" << avg_1best_model_diff << ")" << endl; - vector remember_scores; - remember_scores.push_back( avg_1best_score ); - remember_scores.push_back( avg_1best_model ); - scores_per_iter.push_back( remember_scores ); - if ( avg_1best_score > max_score ) { - max_score = avg_1best_score; - best_t = t; - } - - // close open files - if ( input_fn != "-" ) input.close(); - close( grammar_buf ); - grammar_file.close(); - - time ( &end ); - double time_dif = difftime( end, start ); - overall_time += time_dif; - if ( !quiet ) { - cout << _prec2 << _nopos << "(time " << time_dif/60. << " min, "; - cout << time_dif/(double)in_sz<< " s/S)" << endl; - } - - if ( t+1 != T ) cout << endl; - - if ( noup ) break; - - // write weights after every epoch - std::string s; - std::stringstream out; - out << t; - s = out.str(); - string weights_file = output_file + "." + s; - weights.WriteToFile(weights_file, true ); - - } // outer loop - - unlink( grammar_buf_tmp_fn ); - if ( !noup ) { - if ( !quiet ) cout << endl << "writing weights file '" << cfg["output"].as() << "' ..."; - weights.WriteToFile( cfg["output"].as(), true ); - if ( !quiet ) cout << "done" << endl; - } - - if ( !quiet ) { - cout << _prec5 << _nopos << endl << "---" << endl << "Best iteration: "; - cout << best_t+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; - cout << _prec2 << "This took " << overall_time/60. << " min." << endl; - } - - return 0; -} - diff --git a/dtrain/test/mira_update/sample.h b/dtrain/test/mira_update/sample.h deleted file mode 100644 index 5c331bba..00000000 --- a/dtrain/test/mira_update/sample.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef _DTRAIN_SAMPLE_H_ -#define _DTRAIN_SAMPLE_H_ - - -#include "kbestget.h" - - -namespace dtrain -{ - - -struct TPair -{ - SparseVector first, second; - size_t first_rank, second_rank; - double first_score, second_score; - double model_score_diff; - double loss_diff; -}; - -typedef vector TrainingInstances; - - -void - sample_all( KBestList* kb, TrainingInstances &training, size_t n_pairs ) -{ - std::vector loss_diffs; - TrainingInstances training_tmp; - for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { - for ( size_t j = i+1; j < kb->GetSize(); j++ ) { - TPair p; - p.first = kb->feats[i]; - p.second = kb->feats[j]; - p.first_rank = i; - p.second_rank = j; - p.first_score = kb->scores[i]; - p.second_score = kb->scores[j]; - - bool conservative = 1; - if ( kb->scores[i] - kb->scores[j] < 0 ) { - // j=hope, i=fear - p.model_score_diff = kb->model_scores[j] - kb->model_scores[i]; - p.loss_diff = kb->scores[j] - kb->scores[i]; - training_tmp.push_back(p); - loss_diffs.push_back(p.loss_diff); - } - else if (!conservative) { - // i=hope, j=fear - p.model_score_diff = kb->model_scores[i] - kb->model_scores[j]; - p.loss_diff = kb->scores[i] - kb->scores[j]; - training_tmp.push_back(p); - loss_diffs.push_back(p.loss_diff); - } - } - } - - if (training_tmp.size() > 0) { - double threshold; - std::sort(loss_diffs.begin(), loss_diffs.end()); - std::reverse(loss_diffs.begin(), loss_diffs.end()); - threshold = loss_diffs.size() >= n_pairs ? loss_diffs[n_pairs-1] : loss_diffs[loss_diffs.size()-1]; - cerr << "threshold: " << threshold << endl; - size_t constraints = 0; - for (size_t i = 0; (i < training_tmp.size() && constraints < n_pairs); ++i) { - if (training_tmp[i].loss_diff >= threshold) { - training.push_back(training_tmp[i]); - constraints++; - } - } - } - else { - cerr << "No pairs selected." << endl; - } -} - -void -sample_rand( KBestList* kb, TrainingInstances &training ) -{ - srand( time(NULL) ); - for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { - for ( size_t j = i+1; j < kb->GetSize(); j++ ) { - if ( rand() % 2 ) { - TPair p; - p.first = kb->feats[i]; - p.second = kb->feats[j]; - p.first_rank = i; - p.second_rank = j; - p.first_score = kb->scores[i]; - p.second_score = kb->scores[j]; - training.push_back( p ); - } - } - } -} - - -} // namespace - - -#endif - diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.cc b/dtrain/test/mtm11/logreg_cd/bin_class.cc new file mode 100644 index 00000000..19bcde25 --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/bin_class.cc @@ -0,0 +1,4 @@ +#include "bin_class.h" + +Objective::~Objective() {} + diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.h b/dtrain/test/mtm11/logreg_cd/bin_class.h new file mode 100644 index 00000000..3466109a --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/bin_class.h @@ -0,0 +1,22 @@ +#ifndef _BIN_CLASS_H_ +#define _BIN_CLASS_H_ + +#include +#include "sparse_vector.h" + +struct TrainingInstance { + // TODO add other info? loss for MIRA-type updates? + SparseVector x_feature_map; + bool y; +}; + +struct Objective { + virtual ~Objective(); + + // returns f(x) and f'(x) + virtual double ObjectiveAndGradient(const SparseVector& x, + const std::vector& training_instances, + SparseVector* g) const = 0; +}; + +#endif diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.cc b/dtrain/test/mtm11/logreg_cd/log_reg.cc new file mode 100644 index 00000000..ec2331fe --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/log_reg.cc @@ -0,0 +1,39 @@ +#include "log_reg.h" + +#include +#include + +#include "sparse_vector.h" + +using namespace std; + +double LogisticRegression::ObjectiveAndGradient(const SparseVector& x, + const vector& training_instances, + SparseVector* g) const { + double cll = 0; + for (int i = 0; i < training_instances.size(); ++i) { + const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0] + double lp_false = dotprod; + double lp_true = -dotprod; + if (0 < lp_true) { + lp_true += log1p(exp(-lp_true)); + lp_false = log1p(exp(lp_false)); + } else { + lp_true = log1p(exp(lp_true)); + lp_false += log1p(exp(-lp_false)); + } + lp_true *= -1; + lp_false *= -1; + if (training_instances[i].y) { // true label + cll -= lp_true; + (*g) -= training_instances[i].x_feature_map * exp(lp_false); + // (*g)[0] -= exp(lp_false); // bias + } else { // false label + cll -= lp_false; + (*g) += training_instances[i].x_feature_map * exp(lp_true); + // g += corpus[i].second * exp(lp_true); + } + } + return cll; +} + diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.h b/dtrain/test/mtm11/logreg_cd/log_reg.h new file mode 100644 index 00000000..ecc560b8 --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/log_reg.h @@ -0,0 +1,14 @@ +#ifndef _LOG_REG_H_ +#define _LOG_REG_H_ + +#include +#include "sparse_vector.h" +#include "bin_class.h" + +struct LogisticRegression : public Objective { + double ObjectiveAndGradient(const SparseVector& x, + const std::vector& training_instances, + SparseVector* g) const; +}; + +#endif diff --git a/dtrain/test/mtm11/mira_update/Hildreth.cpp b/dtrain/test/mtm11/mira_update/Hildreth.cpp new file mode 100644 index 00000000..0e67eb15 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/Hildreth.cpp @@ -0,0 +1,187 @@ +#include "Hildreth.h" +#include "sparse_vector.h" + +using namespace std; + +namespace Mira { + vector Hildreth::optimise (vector< SparseVector >& a, vector& b) { + + size_t i; + int max_iter = 10000; + double eps = 0.00000001; + double zero = 0.000000000001; + + vector alpha ( b.size() ); + vector F ( b.size() ); + vector kkt ( b.size() ); + + double max_kkt = -1e100; + + size_t K = b.size(); + + double A[K][K]; + bool is_computed[K]; + for ( i = 0; i < K; i++ ) + { + A[i][i] = a[i].dot(a[i]); + is_computed[i] = false; + } + + int max_kkt_i = -1; + + + for ( i = 0; i < b.size(); i++ ) + { + F[i] = b[i]; + kkt[i] = F[i]; + if ( kkt[i] > max_kkt ) + { + max_kkt = kkt[i]; + max_kkt_i = i; + } + } + + int iter = 0; + double diff_alpha; + double try_alpha; + double add_alpha; + + while ( max_kkt >= eps && iter < max_iter ) + { + + diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; + try_alpha = alpha[max_kkt_i] + diff_alpha; + add_alpha = 0.0; + + if ( try_alpha < 0.0 ) + add_alpha = -1.0 * alpha[max_kkt_i]; + else + add_alpha = diff_alpha; + + alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; + + if ( !is_computed[max_kkt_i] ) + { + for ( i = 0; i < K; i++ ) + { + A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 + //A[i][max_kkt_i] = 0; // for version 1 + is_computed[max_kkt_i] = true; + } + } + + for ( i = 0; i < F.size(); i++ ) + { + F[i] -= add_alpha * A[i][max_kkt_i]; + kkt[i] = F[i]; + if ( alpha[i] > zero ) + kkt[i] = abs ( F[i] ); + } + max_kkt = -1e100; + max_kkt_i = -1; + for ( i = 0; i < F.size(); i++ ) + if ( kkt[i] > max_kkt ) + { + max_kkt = kkt[i]; + max_kkt_i = i; + } + + iter++; + } + + return alpha; + } + + vector Hildreth::optimise (vector< SparseVector >& a, vector& b, double C) { + + size_t i; + int max_iter = 10000; + double eps = 0.00000001; + double zero = 0.000000000001; + + vector alpha ( b.size() ); + vector F ( b.size() ); + vector kkt ( b.size() ); + + double max_kkt = -1e100; + + size_t K = b.size(); + + double A[K][K]; + bool is_computed[K]; + for ( i = 0; i < K; i++ ) + { + A[i][i] = a[i].dot(a[i]); + is_computed[i] = false; + } + + int max_kkt_i = -1; + + + for ( i = 0; i < b.size(); i++ ) + { + F[i] = b[i]; + kkt[i] = F[i]; + if ( kkt[i] > max_kkt ) + { + max_kkt = kkt[i]; + max_kkt_i = i; + } + } + + int iter = 0; + double diff_alpha; + double try_alpha; + double add_alpha; + + while ( max_kkt >= eps && iter < max_iter ) + { + + diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; + try_alpha = alpha[max_kkt_i] + diff_alpha; + add_alpha = 0.0; + + if ( try_alpha < 0.0 ) + add_alpha = -1.0 * alpha[max_kkt_i]; + else if (try_alpha > C) + add_alpha = C - alpha[max_kkt_i]; + else + add_alpha = diff_alpha; + + alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; + + if ( !is_computed[max_kkt_i] ) + { + for ( i = 0; i < K; i++ ) + { + A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 + //A[i][max_kkt_i] = 0; // for version 1 + is_computed[max_kkt_i] = true; + } + } + + for ( i = 0; i < F.size(); i++ ) + { + F[i] -= add_alpha * A[i][max_kkt_i]; + kkt[i] = F[i]; + if (alpha[i] > C - zero) + kkt[i]=-kkt[i]; + else if (alpha[i] > zero) + kkt[i] = abs(F[i]); + + } + max_kkt = -1e100; + max_kkt_i = -1; + for ( i = 0; i < F.size(); i++ ) + if ( kkt[i] > max_kkt ) + { + max_kkt = kkt[i]; + max_kkt_i = i; + } + + iter++; + } + + return alpha; + } +} diff --git a/dtrain/test/mtm11/mira_update/Hildreth.h b/dtrain/test/mtm11/mira_update/Hildreth.h new file mode 100644 index 00000000..8d791085 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/Hildreth.h @@ -0,0 +1,10 @@ +#include "sparse_vector.h" + +namespace Mira { + class Hildreth { + public : + static std::vector optimise(std::vector< SparseVector >& a, std::vector& b); + static std::vector optimise(std::vector< SparseVector >& a, std::vector& b, double C); + }; +} + diff --git a/dtrain/test/mtm11/mira_update/dtrain.cc b/dtrain/test/mtm11/mira_update/dtrain.cc new file mode 100644 index 00000000..933417a4 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/dtrain.cc @@ -0,0 +1,532 @@ +#include "common.h" +#include "kbestget.h" +#include "util.h" +#include "sample.h" +#include "Hildreth.h" + +#include "ksampler.h" + +// boost compression +#include +#include +#include +//#include +//#include +using namespace boost::iostreams; + + +#ifdef DTRAIN_DEBUG +#include "tests.h" +#endif + + +/* + * init + * + */ +bool +init(int argc, char** argv, po::variables_map* cfg) +{ + po::options_description conff( "Configuration File Options" ); + size_t k, N, T, stop, n_pairs; + string s, f, update_type; + conff.add_options() + ( "decoder_config", po::value(), "configuration file for cdec" ) + ( "kbest", po::value(&k)->default_value(DTRAIN_DEFAULT_K), "k for kbest" ) + ( "ngrams", po::value(&N)->default_value(DTRAIN_DEFAULT_N), "N for Ngrams" ) + ( "filter", po::value(&f)->default_value("unique"), "filter kbest list" ) + ( "epochs", po::value(&T)->default_value(DTRAIN_DEFAULT_T), "# of iterations T" ) + ( "input", po::value(), "input file" ) + ( "scorer", po::value(&s)->default_value(DTRAIN_DEFAULT_SCORER), "scoring metric" ) + ( "output", po::value(), "output weights file" ) + ( "stop_after", po::value(&stop)->default_value(0), "stop after X input sentences" ) + ( "weights_file", po::value(), "input weights file (e.g. from previous iteration)" ) + ( "wprint", po::value(), "weights to print on each iteration" ) + ( "noup", po::value()->zero_tokens(), "do not update weights" ); + + po::options_description clo("Command Line Options"); + clo.add_options() + ( "config,c", po::value(), "dtrain config file" ) + ( "quiet,q", po::value()->zero_tokens(), "be quiet" ) + ( "update-type", po::value(&update_type)->default_value("mira"), "perceptron or mira" ) + ( "n-pairs", po::value(&n_pairs)->default_value(10), "number of pairs used to compute update" ) + ( "verbose,v", po::value()->zero_tokens(), "be verbose" ) +#ifndef DTRAIN_DEBUG + ; +#else + ( "test", "run tests and exit"); +#endif + po::options_description config_options, cmdline_options; + + config_options.add(conff); + cmdline_options.add(clo); + cmdline_options.add(conff); + + po::store( parse_command_line(argc, argv, cmdline_options), *cfg ); + if ( cfg->count("config") ) { + ifstream config( (*cfg)["config"].as().c_str() ); + po::store( po::parse_config_file(config, config_options), *cfg ); + } + po::notify(*cfg); + + if ( !cfg->count("decoder_config") || !cfg->count("input") ) { + cerr << cmdline_options << endl; + return false; + } + if ( cfg->count("noup") && cfg->count("decode") ) { + cerr << "You can't use 'noup' and 'decode' at once." << endl; + return false; + } + if ( cfg->count("filter") && (*cfg)["filter"].as() != "unique" + && (*cfg)["filter"].as() != "no" ) { + cerr << "Wrong 'filter' type: '" << (*cfg)["filter"].as() << "'." << endl; + } + #ifdef DTRAIN_DEBUG + if ( !cfg->count("test") ) { + cerr << cmdline_options << endl; + return false; + } + #endif + return true; +} + + +// output formatting +ostream& _nopos( ostream& out ) { return out << resetiosflags( ios::showpos ); } +ostream& _pos( ostream& out ) { return out << setiosflags( ios::showpos ); } +ostream& _prec2( ostream& out ) { return out << setprecision(2); } +ostream& _prec5( ostream& out ) { return out << setprecision(5); } + + + + +/* + * dtrain + * + */ +int +main( int argc, char** argv ) +{ + cout << setprecision( 5 ); + // handle most parameters + po::variables_map cfg; + if ( ! init(argc, argv, &cfg) ) exit(1); // something is wrong +#ifdef DTRAIN_DEBUG + if ( cfg.count("test") ) run_tests(); // run tests and exit +#endif + bool quiet = false; + if ( cfg.count("quiet") ) quiet = true; + bool verbose = false; + if ( cfg.count("verbose") ) verbose = true; + bool noup = false; + if ( cfg.count("noup") ) noup = true; + const size_t k = cfg["kbest"].as(); + const size_t N = cfg["ngrams"].as(); + const size_t T = cfg["epochs"].as(); + const size_t stop_after = cfg["stop_after"].as(); + const string filter_type = cfg["filter"].as(); + const string update_type = cfg["update-type"].as(); + const size_t n_pairs = cfg["n-pairs"].as(); + const string output_file = cfg["output"].as(); + if ( !quiet ) { + cout << endl << "dtrain" << endl << "Parameters:" << endl; + cout << setw(25) << "k " << k << endl; + cout << setw(25) << "N " << N << endl; + cout << setw(25) << "T " << T << endl; + if ( cfg.count("stop-after") ) + cout << setw(25) << "stop_after " << stop_after << endl; + if ( cfg.count("weights") ) + cout << setw(25) << "weights " << cfg["weights"].as() << endl; + cout << setw(25) << "input " << "'" << cfg["input"].as() << "'" << endl; + cout << setw(25) << "filter " << "'" << filter_type << "'" << endl; + } + + vector wprint; + if ( cfg.count("wprint") ) { + boost::split( wprint, cfg["wprint"].as(), boost::is_any_of(" ") ); + } + + // setup decoder, observer + register_feature_functions(); + SetSilent(true); + ReadFile ini_rf( cfg["decoder_config"].as() ); + if ( !quiet ) + cout << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl; + Decoder decoder( ini_rf.stream() ); + //KBestGetter observer( k, filter_type ); + MT19937 rng; + KSampler observer( k, &rng ); + + // scoring metric/scorer + string scorer_str = cfg["scorer"].as(); + double (*scorer)( NgramCounts&, const size_t, const size_t, size_t, vector ); + if ( scorer_str == "bleu" ) { + scorer = &bleu; + } else if ( scorer_str == "stupid_bleu" ) { + scorer = &stupid_bleu; + } else if ( scorer_str == "smooth_bleu" ) { + scorer = &smooth_bleu; + } else if ( scorer_str == "approx_bleu" ) { + scorer = &approx_bleu; + } else { + cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; + exit(1); + } + // for approx_bleu + NgramCounts global_counts( N ); // counts for 1 best translations + size_t global_hyp_len = 0; // sum hypothesis lengths + size_t global_ref_len = 0; // sum reference lengths + // this is all BLEU implmentations + vector bleu_weights; // we leave this empty -> 1/N; TODO? + if ( !quiet ) cout << setw(26) << "scorer '" << scorer_str << "'" << endl << endl; + + // init weights + Weights weights; + if ( cfg.count("weights") ) weights.InitFromFile( cfg["weights"].as() ); + SparseVector lambdas; + weights.InitSparseVector( &lambdas ); + vector dense_weights; + + // input + if ( !quiet && !verbose ) + cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl; + string input_fn = cfg["input"].as(); + ifstream input; + if ( input_fn != "-" ) input.open( input_fn.c_str() ); + string in; + vector in_split; // input: src\tref\tpsg + vector ref_tok; // tokenized reference + vector ref_ids; // reference as vector of WordID + string grammar_str; + + // buffer input for t > 0 + vector src_str_buf; // source strings, TODO? memory + vector > ref_ids_buf; // references as WordID vecs + filtering_ostream grammar_buf; // written to compressed file in /tmp + // this is for writing the grammar buffer file + grammar_buf.push( gzip_compressor() ); + char grammar_buf_tmp_fn[] = DTRAIN_TMP_DIR"/dtrain-grammars-XXXXXX"; + mkstemp( grammar_buf_tmp_fn ); + grammar_buf.push( file_sink(grammar_buf_tmp_fn, ios::binary | ios::trunc) ); + + size_t sid = 0, in_sz = 99999999; // sentence id, input size + double acc_1best_score = 0., acc_1best_model = 0.; + vector > scores_per_iter; + double max_score = 0.; + size_t best_t = 0; + bool next = false, stop = false; + double score = 0.; + size_t cand_len = 0; + double overall_time = 0.; + + // for the perceptron/SVM; TODO as params + double eta = 0.0005; + double gamma = 0.;//01; // -> SVM + lambdas.add_value( FD::Convert("__bias"), 0 ); + + // for random sampling + srand ( time(NULL) ); + + + for ( size_t t = 0; t < T; t++ ) // T epochs + { + + time_t start, end; + time( &start ); + + // actually, we need only need this if t > 0 FIXME + ifstream grammar_file( grammar_buf_tmp_fn, ios_base::in | ios_base::binary ); + filtering_istream grammar_buf_in; + grammar_buf_in.push( gzip_decompressor() ); + grammar_buf_in.push( grammar_file ); + + // reset average scores + acc_1best_score = acc_1best_model = 0.; + + // reset sentence counter + sid = 0; + + if ( !quiet ) cout << "Iteration #" << t+1 << " of " << T << "." << endl; + + while( true ) + { + + // get input from stdin or file + in.clear(); + next = stop = false; // next iteration, premature stop + if ( t == 0 ) { + if ( input_fn == "-" ) { + if ( !getline(cin, in) ) next = true; + } else { + if ( !getline(input, in) ) next = true; + } + } else { + if ( sid == in_sz ) next = true; // stop if we reach the end of our input + } + // stop after X sentences (but still iterate for those) + if ( stop_after > 0 && stop_after == sid && !next ) stop = true; + + // produce some pretty output + if ( !quiet && !verbose ) { + if ( sid == 0 ) cout << " "; + if ( (sid+1) % (DTRAIN_DOTS) == 0 ) { + cout << "."; + cout.flush(); + } + if ( (sid+1) % (20*DTRAIN_DOTS) == 0) { + cout << " " << sid+1 << endl; + if ( !next && !stop ) cout << " "; + } + if ( stop ) { + if ( sid % (20*DTRAIN_DOTS) != 0 ) cout << " " << sid << endl; + cout << "Stopping after " << stop_after << " input sentences." << endl; + } else { + if ( next ) { + if ( sid % (20*DTRAIN_DOTS) != 0 ) { + cout << " " << sid << endl; + } + } + } + } + + // next iteration + if ( next || stop ) break; + + // weights + dense_weights.clear(); + weights.InitFromVector( lambdas ); + weights.InitVector( &dense_weights ); + decoder.SetWeights( dense_weights ); + + if ( t == 0 ) { + // handling input + in_split.clear(); + boost::split( in_split, in, boost::is_any_of("\t") ); // in_split[0] is id + // getting reference + ref_tok.clear(); ref_ids.clear(); + boost::split( ref_tok, in_split[2], boost::is_any_of(" ") ); + register_and_convert( ref_tok, ref_ids ); + ref_ids_buf.push_back( ref_ids ); + // process and set grammar + bool broken_grammar = true; + for ( string::iterator ti = in_split[3].begin(); ti != in_split[3].end(); ti++ ) { + if ( !isspace(*ti) ) { + broken_grammar = false; + break; + } + } + if ( broken_grammar ) continue; + grammar_str = boost::replace_all_copy( in_split[3], " __NEXT__RULE__ ", "\n" ) + "\n"; // FIXME copy, __ + grammar_buf << grammar_str << DTRAIN_GRAMMAR_DELIM << endl; + decoder.SetSentenceGrammarFromString( grammar_str ); + // decode, kbest + src_str_buf.push_back( in_split[1] ); + decoder.Decode( in_split[1], &observer ); + } else { + // get buffered grammar + grammar_str.clear(); + int i = 1; + while ( true ) { + string g; + getline( grammar_buf_in, g ); + if ( g == DTRAIN_GRAMMAR_DELIM ) break; + grammar_str += g+"\n"; + i += 1; + } + decoder.SetSentenceGrammarFromString( grammar_str ); + // decode, kbest + decoder.Decode( src_str_buf[sid], &observer ); + } + + // get kbest list + KBestList* kb; + //if ( ) { // TODO get from forest + kb = observer.GetKBest(); + //} + + // scoring kbest + if ( t > 0 ) ref_ids = ref_ids_buf[sid]; + for ( size_t i = 0; i < kb->GetSize(); i++ ) { + NgramCounts counts = make_ngram_counts( ref_ids, kb->sents[i], N ); + // this is for approx bleu + if ( scorer_str == "approx_bleu" ) { + if ( i == 0 ) { // 'context of 1best translations' + global_counts += counts; + global_hyp_len += kb->sents[i].size(); + global_ref_len += ref_ids.size(); + counts.reset(); + cand_len = 0; + } else { + cand_len = kb->sents[i].size(); + } + NgramCounts counts_tmp = global_counts + counts; + // TODO as param + score = 0.9 * scorer( counts_tmp, + global_ref_len, + global_hyp_len + cand_len, N, bleu_weights ); + } else { + // other scorers + cand_len = kb->sents[i].size(); + score = scorer( counts, + ref_ids.size(), + kb->sents[i].size(), N, bleu_weights ); + } + + kb->scores.push_back( score ); + + if ( i == 0 ) { + acc_1best_score += score; + acc_1best_model += kb->model_scores[i]; + } + + if ( verbose ) { + if ( i == 0 ) cout << "'" << TD::GetString( ref_ids ) << "' [ref]" << endl; + cout << _prec5 << _nopos << "[hyp " << i << "] " << "'" << TD::GetString( kb->sents[i] ) << "'"; + cout << " [SCORE=" << score << ",model="<< kb->model_scores[i] << "]" << endl; + cout << kb->feats[i] << endl; // this is maybe too verbose + } + } // Nbest loop + + if ( verbose ) cout << endl; + + + // UPDATE WEIGHTS + if ( !noup ) { + + TrainingInstances pairs; + sample_all( kb, pairs, n_pairs ); + + vector< SparseVector > featureValueDiffs; + vector lossMinusModelScoreDiffs; + for ( TrainingInstances::iterator ti = pairs.begin(); + ti != pairs.end(); ti++ ) { + + SparseVector dv; + if ( ti->first_score - ti->second_score < 0 ) { + dv = ti->second - ti->first; + dv.add_value( FD::Convert("__bias"), -1 ); + + featureValueDiffs.push_back(dv); + double lossMinusModelScoreDiff = ti->loss_diff - ti->model_score_diff; + lossMinusModelScoreDiffs.push_back(lossMinusModelScoreDiff); + + if (update_type == "perceptron") { + lambdas += dv * eta; + cerr << "after perceptron update: " << lambdas << endl << endl; + } + + if ( verbose ) { + cout << "{{ f("<< ti->first_rank <<") > f(" << ti->second_rank << ") but g(i)="<< ti->first_score <<" < g(j)="<< ti->second_score << " so update" << endl; + cout << " i " << TD::GetString(kb->sents[ti->first_rank]) << endl; + cout << " " << kb->feats[ti->first_rank] << endl; + cout << " j " << TD::GetString(kb->sents[ti->second_rank]) << endl; + cout << " " << kb->feats[ti->second_rank] << endl; + cout << " diff vec: " << dv << endl; + cout << " lambdas after update: " << lambdas << endl; + cout << "}}" << endl; + } + } else { + //SparseVector reg; + //reg = lambdas * ( 2 * gamma ); + //lambdas += reg * ( -eta ); + } + } + cerr << "Collected " << featureValueDiffs.size() << " constraints." << endl; + + double slack = 0.01; + if (update_type == "mira") { + if (featureValueDiffs.size() > 0) { + vector alphas; + if (slack != 0) { + alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs, slack); + } else { + alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs); + } + + for (size_t k = 0; k < featureValueDiffs.size(); ++k) { + lambdas += featureValueDiffs[k] * alphas[k]; + } + // cerr << "after mira update: " << lambdas << endl << endl; + } + } + } + + ++sid; + + } // input loop + + if ( t == 0 ) in_sz = sid; // remember size (lines) of input + + // print some stats + double avg_1best_score = acc_1best_score/(double)in_sz; + double avg_1best_model = acc_1best_model/(double)in_sz; + double avg_1best_score_diff, avg_1best_model_diff; + if ( t > 0 ) { + avg_1best_score_diff = avg_1best_score - scores_per_iter[t-1][0]; + avg_1best_model_diff = avg_1best_model - scores_per_iter[t-1][1]; + } else { + avg_1best_score_diff = avg_1best_score; + avg_1best_model_diff = avg_1best_model; + } + cout << _prec5 << _pos << "WEIGHTS" << endl; + for (vector::iterator it = wprint.begin(); it != wprint.end(); it++) { + cout << setw(16) << *it << " = " << dense_weights[FD::Convert( *it )] << endl; + } + + cout << " ---" << endl; + cout << _nopos << " avg score: " << avg_1best_score; + cout << _pos << " (" << avg_1best_score_diff << ")" << endl; + cout << _nopos << "avg model score: " << avg_1best_model; + cout << _pos << " (" << avg_1best_model_diff << ")" << endl; + vector remember_scores; + remember_scores.push_back( avg_1best_score ); + remember_scores.push_back( avg_1best_model ); + scores_per_iter.push_back( remember_scores ); + if ( avg_1best_score > max_score ) { + max_score = avg_1best_score; + best_t = t; + } + + // close open files + if ( input_fn != "-" ) input.close(); + close( grammar_buf ); + grammar_file.close(); + + time ( &end ); + double time_dif = difftime( end, start ); + overall_time += time_dif; + if ( !quiet ) { + cout << _prec2 << _nopos << "(time " << time_dif/60. << " min, "; + cout << time_dif/(double)in_sz<< " s/S)" << endl; + } + + if ( t+1 != T ) cout << endl; + + if ( noup ) break; + + // write weights after every epoch + std::string s; + std::stringstream out; + out << t; + s = out.str(); + string weights_file = output_file + "." + s; + weights.WriteToFile(weights_file, true ); + + } // outer loop + + unlink( grammar_buf_tmp_fn ); + if ( !noup ) { + if ( !quiet ) cout << endl << "writing weights file '" << cfg["output"].as() << "' ..."; + weights.WriteToFile( cfg["output"].as(), true ); + if ( !quiet ) cout << "done" << endl; + } + + if ( !quiet ) { + cout << _prec5 << _nopos << endl << "---" << endl << "Best iteration: "; + cout << best_t+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; + cout << _prec2 << "This took " << overall_time/60. << " min." << endl; + } + + return 0; +} + diff --git a/dtrain/test/mtm11/mira_update/sample.h b/dtrain/test/mtm11/mira_update/sample.h new file mode 100644 index 00000000..5c331bba --- /dev/null +++ b/dtrain/test/mtm11/mira_update/sample.h @@ -0,0 +1,101 @@ +#ifndef _DTRAIN_SAMPLE_H_ +#define _DTRAIN_SAMPLE_H_ + + +#include "kbestget.h" + + +namespace dtrain +{ + + +struct TPair +{ + SparseVector first, second; + size_t first_rank, second_rank; + double first_score, second_score; + double model_score_diff; + double loss_diff; +}; + +typedef vector TrainingInstances; + + +void + sample_all( KBestList* kb, TrainingInstances &training, size_t n_pairs ) +{ + std::vector loss_diffs; + TrainingInstances training_tmp; + for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { + for ( size_t j = i+1; j < kb->GetSize(); j++ ) { + TPair p; + p.first = kb->feats[i]; + p.second = kb->feats[j]; + p.first_rank = i; + p.second_rank = j; + p.first_score = kb->scores[i]; + p.second_score = kb->scores[j]; + + bool conservative = 1; + if ( kb->scores[i] - kb->scores[j] < 0 ) { + // j=hope, i=fear + p.model_score_diff = kb->model_scores[j] - kb->model_scores[i]; + p.loss_diff = kb->scores[j] - kb->scores[i]; + training_tmp.push_back(p); + loss_diffs.push_back(p.loss_diff); + } + else if (!conservative) { + // i=hope, j=fear + p.model_score_diff = kb->model_scores[i] - kb->model_scores[j]; + p.loss_diff = kb->scores[i] - kb->scores[j]; + training_tmp.push_back(p); + loss_diffs.push_back(p.loss_diff); + } + } + } + + if (training_tmp.size() > 0) { + double threshold; + std::sort(loss_diffs.begin(), loss_diffs.end()); + std::reverse(loss_diffs.begin(), loss_diffs.end()); + threshold = loss_diffs.size() >= n_pairs ? loss_diffs[n_pairs-1] : loss_diffs[loss_diffs.size()-1]; + cerr << "threshold: " << threshold << endl; + size_t constraints = 0; + for (size_t i = 0; (i < training_tmp.size() && constraints < n_pairs); ++i) { + if (training_tmp[i].loss_diff >= threshold) { + training.push_back(training_tmp[i]); + constraints++; + } + } + } + else { + cerr << "No pairs selected." << endl; + } +} + +void +sample_rand( KBestList* kb, TrainingInstances &training ) +{ + srand( time(NULL) ); + for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { + for ( size_t j = i+1; j < kb->GetSize(); j++ ) { + if ( rand() % 2 ) { + TPair p; + p.first = kb->feats[i]; + p.second = kb->feats[j]; + p.first_rank = i; + p.second_rank = j; + p.first_score = kb->scores[i]; + p.second_score = kb->scores[j]; + training.push_back( p ); + } + } + } +} + + +} // namespace + + +#endif + diff --git a/dtrain/test/test.in b/dtrain/test/test.in deleted file mode 100644 index 4f53335e..00000000 --- a/dtrain/test/test.in +++ /dev/null @@ -1,3 +0,0 @@ -0 vorrichtung means [X] ||| vorrichtung ||| apparatus ||| LogP=0 ||| 0-0 __NEXT_RULE__ [X] ||| vorrichtung ||| means ||| LogP=-100 ||| 0-0 -1 Test test [X] ||| Test ||| test ||| LogP=0 ||| 0-0 __NEXT_RULE__ [X] ||| Test ||| xxx ||| LogP=-100 ||| 0-0 -2 kaputt broken diff --git a/dtrain/test/toy/dtrain.ini b/dtrain/test/toy/dtrain.ini index 3548bbb6..abf22b94 100644 --- a/dtrain/test/toy/dtrain.ini +++ b/dtrain/test/toy/dtrain.ini @@ -1,11 +1,12 @@ decoder_config=test/toy/cdec.ini -input=test/toy/in +input=test/toy/input output=- -print_weights=logp use_shell use_house PassThrough - +print_weights=logp shell_rule house_rule small_rule little_rule PassThrough k=4 -N=3 -epochs=2 +N=4 +epochs=3 scorer=stupid_bleu sample_from=kbest filter=uniq +pair_sampling=all +learning_rate=1 diff --git a/dtrain/test/toy/in b/dtrain/test/toy/in deleted file mode 100644 index d7b7d080..00000000 --- a/dtrain/test/toy/in +++ /dev/null @@ -1,2 +0,0 @@ -0 ich sah ein kleines haus i saw a little house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 use_house=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 use_shell=1 [JJ] ||| kleines ||| small ||| logp=0 [JJ] ||| kleines ||| little ||| logp=0 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 -1 ich fand ein grosses haus i found a large house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 use_house=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 use_shell=1 [JJ] ||| kleines ||| small ||| logp=0 [JJ] ||| kleines ||| little ||| logp=0 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 diff --git a/dtrain/test/toy/input b/dtrain/test/toy/input new file mode 100644 index 00000000..4d10a9ea --- /dev/null +++ b/dtrain/test/toy/input @@ -0,0 +1,2 @@ +0 ich sah ein kleines haus i saw a little house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1 [JJ] ||| kleines ||| small ||| logp=0 small_rule=1 [JJ] ||| kleines ||| little ||| logp=0 little_rule=1 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 +1 ich fand ein kleines haus i found a little house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1 [JJ] ||| kleines ||| small ||| logp=0 small_rule=1 [JJ] ||| kleines ||| little ||| logp=0 little_rule=1 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 -- cgit v1.2.3