summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xdecoder/1dev.ur1
-rwxr-xr-xdecoder/apply_fsa_models.README21
-rwxr-xr-xdecoder/cdec-gz.ini7
-rwxr-xr-xdecoder/cdec-nolm-tuned.ini7
-rwxr-xr-xdecoder/decode.sh10
-rwxr-xr-xdecoder/do.tests.sh1
-rwxr-xr-xdecoder/fsa-decode.sh3
-rwxr-xr-xdecoder/fsa-hiero.ini5
-rwxr-xr-xdecoder/fsa.ini2
-rwxr-xr-xdecoder/glue-lda.scfg8
-rwxr-xr-xdecoder/grammar.hiero151
-rwxr-xr-xdecoder/perro.sh1
-rwxr-xr-xdecoder/perro.ur1
-rwxr-xr-xdecoder/short.ur1
-rw-r--r--decoder/weights-fsa14
-rwxr-xr-xdecoder/weights.hiero10
-rwxr-xr-xdpmert/tac.pl8
-rw-r--r--expLog60
-rwxr-xr-xgraehl/NOTES18
-rwxr-xr-xgraehl/NOTES.beam29
-rwxr-xr-xgraehl/NOTES.earley111
-rwxr-xr-xgraehl/NOTES.lm.phrase180
-rwxr-xr-xgraehl/NOTES.partial.binarize21
-rwxr-xr-xgraehl/NOTES.wfsa16
-rwxr-xr-xrescore/cdec_kbest_to_zmert.pl64
-rw-r--r--rescore/example/README4
-rw-r--r--rescore/example/cdec.ini2
-rw-r--r--rescore/example/hyp.txt5
-rw-r--r--rescore/example/small.scfg9
-rw-r--r--rescore/example/source.txt2
-rw-r--r--rescore/example/weights1
-rwxr-xr-xrescore/generate_zmert_params_from_weights.pl26
-rwxr-xr-xrescore/rerank.pl86
-rwxr-xr-xrescore/rescore_inv_model1.pl126
-rwxr-xr-xrescore/rescore_with_cdec_model.pl121
35 files changed, 0 insertions, 1132 deletions
diff --git a/decoder/1dev.ur b/decoder/1dev.ur
deleted file mode 100755
index adeaa101..00000000
--- a/decoder/1dev.ur
+++ /dev/null
@@ -1 +0,0 @@
-krAcy ( AstRAf rpwrtRr ) krAcy myN pyr kw mxtlf HAdvAt myN xAtwn smyt 4 AfrAd hlAk hw gyY jbkh smndr sY Ayk $xS ky lA$ mly .
diff --git a/decoder/apply_fsa_models.README b/decoder/apply_fsa_models.README
deleted file mode 100755
index 7e116a62..00000000
--- a/decoder/apply_fsa_models.README
+++ /dev/null
@@ -1,21 +0,0 @@
-trie root and trie lhs2[lhs-nodeid] -> trie node
-
-trie node edges (adj) - list of w,dest,p. dest==0 means it's a completed rule (note: p is redundant with node e.dest->p-p, except in case of dest=0). we will also use null_wordid (max_int) for dest=0 edges, but that doesn't matter
-
-we intersect by iterating over adj and scoring w/ fsa. TODO: index for sparse fsa; for now we assume smoothed ngram fsa where all items are scorable.
-
-predicted items: we don't make copies of the pending predictions as we scan toward completion; instead, item backpointers are followed until the prediction (where backpointer=0). such backpointer=0 items have a queue of prediction-originating items.
-
-reusing completed items using a lookup on pair [NT,a] -> all [NT,a,b] lazy best-first. b-next (right state) index in lazy index.
-
-perhaps predictors need to register the # of items it has already mated with. (b-next index)
-
-comb-like (cube) t-next (position in trie node edge list), b-next? or just check chart and don't redup. depends on whether we want just 1best or kbest deriv - diff. ways of reaching same result are good in kbest.
-
-types of chart items:
-
-A->t.*,a,b (trie node t) with mutable state t-next for generating successor lazily (vs. all at once)
-
-A->t.B,a,b (t-next of A->t.* points to (B,t')): mutable state b-next for choosing which B->b,? to use. note: such an item can't be queued immediately on its own, but can be added to the pending list of B->b,? ; once any B->b,? is completed then we see if any more b-next are already known; if they're exhausted then we add back to pending list?
-
-A->a,? - list of all known (b,inside prob) such that A[a,b]. we may also choose to represent this as A->.*,a,a.
diff --git a/decoder/cdec-gz.ini b/decoder/cdec-gz.ini
deleted file mode 100755
index f9b15420..00000000
--- a/decoder/cdec-gz.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-cubepruning_pop_limit=200
-feature_function=WordPenalty
-feature_function=ArityPenalty
-add_pass_through_rules=true
-formalism=scfg
-grammar=mt09.grammar.gz
-weights=weights.tune.nolm
diff --git a/decoder/cdec-nolm-tuned.ini b/decoder/cdec-nolm-tuned.ini
deleted file mode 100755
index 5ebab747..00000000
--- a/decoder/cdec-nolm-tuned.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-cubepruning_pop_limit=200
-feature_function=WordPenalty
-feature_function=ArityPenalty
-add_pass_through_rules=true
-formalism=scfg
-grammar=mt09.grammar
-weights=weights.tune.nolm
diff --git a/decoder/decode.sh b/decoder/decode.sh
deleted file mode 100755
index 677e64ad..00000000
--- a/decoder/decode.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-d=$(dirname `readlink -f $0`)/
-decode() {
-if [ "$lm" ] ; then
- lmargs0=-F
- lmargs1="LanguageModel lm.gz -n LM"
-fi
-set -x
-$gdb ${cdec:=$d/cdec} -c $d/${cfg:=cdec-fsa}.ini -i $d/${in:=1dev.ur} $lmargs0 "$lmargs1" --show_features --show_config --show_weights "$@"
-set +x
-}
diff --git a/decoder/do.tests.sh b/decoder/do.tests.sh
deleted file mode 100755
index b3ddeb18..00000000
--- a/decoder/do.tests.sh
+++ /dev/null
@@ -1 +0,0 @@
-for f in *_test; do ./$f; done
diff --git a/decoder/fsa-decode.sh b/decoder/fsa-decode.sh
deleted file mode 100755
index 66879523..00000000
--- a/decoder/fsa-decode.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-d=$(dirname `readlink -f $0`)/
-. $d/decode.sh
-in=1dev.ur cfg=cdec-fsa decode
diff --git a/decoder/fsa-hiero.ini b/decoder/fsa-hiero.ini
deleted file mode 100755
index 7c7d0347..00000000
--- a/decoder/fsa-hiero.ini
+++ /dev/null
@@ -1,5 +0,0 @@
-formalism=scfg
-scfg_extra_glue_grammar=glue-lda.scfg
-grammar=grammar.hiero
-show_tree_structure=true
-weights=weights.hiero
diff --git a/decoder/fsa.ini b/decoder/fsa.ini
deleted file mode 100755
index 571a2e34..00000000
--- a/decoder/fsa.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-feature_function=ShorterThanPrev
-feature_function=LongerThanPrev
diff --git a/decoder/glue-lda.scfg b/decoder/glue-lda.scfg
deleted file mode 100755
index 27489817..00000000
--- a/decoder/glue-lda.scfg
+++ /dev/null
@@ -1,8 +0,0 @@
-[S] ||| [S,1] [X0,2] ||| [1] [2] ||| Glue=1
-[S] ||| [X0,1] ||| [1] ||| GlueTop=1
-[S] ||| [S,1] [X1,2] ||| [1] [2] ||| Glue=1
-[S] ||| [X1,1] ||| [1] ||| GlueTop=1
-[S] ||| [S,1] [X2,2] ||| [1] [2] ||| Glue=1
-[S] ||| [X2,1] ||| [1] ||| GlueTop=1
-[S] ||| [S,1] [X3,2] ||| [1] [2] ||| Glue=1
-[S] ||| [X3,1] ||| [1] ||| GlueTop=1
diff --git a/decoder/grammar.hiero b/decoder/grammar.hiero
deleted file mode 100755
index 79adf33a..00000000
--- a/decoder/grammar.hiero
+++ /dev/null
@@ -1,151 +0,0 @@
-[X] ||| . ||| . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] . ||| [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] anciano ||| [1] old man ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| [X,1] anciano . ||| [1] old man . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| [X,1] anciano [X,2] ||| [1] old man [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| [X,1] feo ||| ugly [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] feo . ||| ugly [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] feo [X,2] ||| ugly [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato ||| [1] cat ||| EgivenF=0.405465 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato . ||| [1] cat . ||| EgivenF=0.405465 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato [X,2] ||| [1] [2] cat ||| EgivenF=0 FgivenE=1.09861 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato [X,2] ||| [1] cat [2] ||| EgivenF=0 FgivenE=0.405465 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato [X,2] . ||| [1] [2] cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato negro ||| [1] black cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato negro . ||| [1] black cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] gato negro [X,2] ||| [1] black cat [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] grande ||| big [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] grande . ||| big [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] grande [X,2] ||| big [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] negro ||| black [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] negro . ||| black [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] negro [X,2] ||| black [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] oruga ||| [1] caterpiller ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] oruga . ||| [1] caterpiller . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] oruga [X,2] ||| [1] caterpiller [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] patito [X,2] ||| [1] [2] duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] patito [X,2] . ||| [1] [2] duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] patito feo ||| [1] ugly duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] patito feo . ||| [1] ugly duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] patito feo [X,2] ||| [1] ugly duckling [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] peces ||| [1] fish ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] peces . ||| [1] fish . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] peces [X,2] ||| [1] fish [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro ||| [1] dog ||| EgivenF=0.405465 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro . ||| [1] dog . ||| EgivenF=0.405465 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro [X,2] ||| [1] dog [2] ||| EgivenF=0 FgivenE=0.405465 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro [X,2] ||| [1] [2] dog ||| EgivenF=0 FgivenE=1.09861 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro [X,2] . ||| [1] [2] dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro grande ||| [1] big dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro grande . ||| [1] big dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] perro grande [X,2] ||| [1] big dog [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] pájaro [X,2] ||| [1] [2] bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] pájaro [X,2] . ||| [1] [2] bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] pájaro negro ||| [1] black bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] pájaro negro . ||| [1] black bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| [X,1] pájaro negro [X,2] ||| [1] black bird [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| anciano ||| old man ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| anciano . ||| old man . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| anciano [X,1] ||| old man [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| el ||| the ||| EgivenF=0.287682 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] ||| the [1] ||| EgivenF=0.287682 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] . ||| the [1] . ||| EgivenF=0.287682 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] feo ||| the ugly [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] feo . ||| the ugly [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] feo [X,2] ||| the ugly [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] grande ||| the big [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] grande . ||| the big [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] grande [X,2] ||| the big [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] negro ||| the black [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] negro . ||| the black [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el [X,1] negro [X,2] ||| the black [1] [2] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato ||| the cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato . ||| the cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato [X,1] ||| the [1] cat ||| EgivenF=0 FgivenE=0.693147 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato [X,1] ||| the cat [1] ||| EgivenF=0 FgivenE=0.693147 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato [X,1] . ||| the [1] cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato negro ||| the black cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato negro . ||| the black cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el gato negro [X,1] ||| the black cat [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el patito [X,1] ||| the [1] duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el patito [X,1] . ||| the [1] duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el patito feo ||| the ugly duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el patito feo . ||| the ugly duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el patito feo [X,1] ||| the ugly duckling [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro ||| the dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro . ||| the dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro [X,1] ||| the [1] dog ||| EgivenF=0 FgivenE=0.693147 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro [X,1] ||| the dog [1] ||| EgivenF=0 FgivenE=0.693147 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro [X,1] . ||| the [1] dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro grande ||| the big dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro grande . ||| the big dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el perro grande [X,1] ||| the big dog [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el pájaro [X,1] ||| the [1] bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el pájaro [X,1] . ||| the [1] bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el pájaro negro ||| the black bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el pájaro negro . ||| the black bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| el pájaro negro [X,1] ||| the black bird [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0.287682 LexFgivenE=0
-[X] ||| eso ||| that ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| eso [X,1] ||| that [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| eso [X,1] . ||| that [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| eso perro ||| that dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| eso perro . ||| that dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| eso perro [X,1] ||| that dog [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este ||| this ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este [X,1] ||| this [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este [X,1] . ||| this [1] . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este anciano ||| this old man ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| este anciano . ||| this old man . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| este anciano [X,1] ||| this old man [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=1.38629
-[X] ||| este gato ||| this cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este gato . ||| this cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| este gato [X,1] ||| this cat [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| feo ||| ugly ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato ||| cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato . ||| cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato [X,1] ||| [1] cat ||| EgivenF=1.09861 FgivenE=1.09861 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato [X,1] ||| cat [1] ||| EgivenF=0 FgivenE=0.405465 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato [X,1] . ||| [1] cat . ||| EgivenF=1.09861 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato negro ||| black cat ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato negro . ||| black cat . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| gato negro [X,1] ||| black cat [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| grande ||| big ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| la ||| the ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| la [X,1] ||| the [1] ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| la [X,1] . ||| the [1] . ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| la oruga ||| the caterpiller ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| la oruga . ||| the caterpiller . ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| la oruga [X,1] ||| the caterpiller [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los ||| the ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los [X,1] ||| the [1] ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los [X,1] . ||| the [1] . ||| EgivenF=2.07944 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los peces ||| the fish ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los peces . ||| the fish . ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| los peces [X,1] ||| the fish [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=2.07944 LexFgivenE=0
-[X] ||| negro ||| black ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| oruga ||| caterpiller ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| oruga . ||| caterpiller . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| oruga [X,1] ||| caterpiller [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito ||| duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito [X,1] ||| [1] duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito [X,1] . ||| [1] duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito feo ||| ugly duckling ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito feo . ||| ugly duckling . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| patito feo [X,1] ||| ugly duckling [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| peces ||| fish ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| peces . ||| fish . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| peces [X,1] ||| fish [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro ||| dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro . ||| dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro [X,1] ||| [1] dog ||| EgivenF=1.09861 FgivenE=1.09861 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro [X,1] ||| dog [1] ||| EgivenF=0 FgivenE=0.405465 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro [X,1] . ||| [1] dog . ||| EgivenF=1.09861 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro grande ||| big dog ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro grande . ||| big dog . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| perro grande [X,1] ||| big dog [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro ||| bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro [X,1] ||| [1] bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro [X,1] . ||| [1] bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro negro ||| black bird ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro negro . ||| black bird . ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
-[X] ||| pájaro negro [X,1] ||| black bird [1] ||| EgivenF=0 FgivenE=0 LexEgivenF=0 LexFgivenE=0
diff --git a/decoder/perro.sh b/decoder/perro.sh
deleted file mode 100755
index 3e54ac71..00000000
--- a/decoder/perro.sh
+++ /dev/null
@@ -1 +0,0 @@
-$gdb $cdec "$@" -k 30 --show_features -c fsa-hiero.ini -i perro.ur
diff --git a/decoder/perro.ur b/decoder/perro.ur
deleted file mode 100755
index 6c5da6d7..00000000
--- a/decoder/perro.ur
+++ /dev/null
@@ -1 +0,0 @@
-eso perro feo
diff --git a/decoder/short.ur b/decoder/short.ur
deleted file mode 100755
index 48612801..00000000
--- a/decoder/short.ur
+++ /dev/null
@@ -1 +0,0 @@
-krAcy myN pyr kw mxtlf HAdvAt
diff --git a/decoder/weights-fsa b/decoder/weights-fsa
deleted file mode 100644
index 3cc96c2f..00000000
--- a/decoder/weights-fsa
+++ /dev/null
@@ -1,14 +0,0 @@
-Arity_0 1.70741473606976
-Arity_1 1.12426238048012
-Arity_2 1.14986187839554
-Glue -0.04589037041388
-LanguageModel 1.09051
-LM 1.09051
-PassThrough -3.66226367902928
-PhraseModel_0 -1.94633451863252
-PhraseModel_1 -0.1475347695476
-PhraseModel_2 -1.614818994946
-WordPenalty -3.0
-WordPenaltyFsa -0.56028442964748
-ShorterThanPrev -10
-LongerThanPrev -10
diff --git a/decoder/weights.hiero b/decoder/weights.hiero
deleted file mode 100755
index 6747f059..00000000
--- a/decoder/weights.hiero
+++ /dev/null
@@ -1,10 +0,0 @@
-SameFirstLetter 1
-LongerThanPrev 1
-ShorterThanPrev 1
-GlueTop 0.0
-Glue -1.0
-EgivenF -0.5
-FgivenE -0.5
-LexEgivenF -0.5
-LexFgivenE -0.5
-LM 1
diff --git a/dpmert/tac.pl b/dpmert/tac.pl
deleted file mode 100755
index 9fb525c1..00000000
--- a/dpmert/tac.pl
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/perl
-
-while(<>) {
- chomp;
- $|=1;
- print (scalar reverse($_));
- print "\n";
-}
diff --git a/expLog b/expLog
deleted file mode 100644
index 2070ac98..00000000
--- a/expLog
+++ /dev/null
@@ -1,60 +0,0 @@
-TIME MEASURES AFTER MERGE WITH cdec:
-8/July/2011
-commit ed8a6e81d87f6e917ecf
-
-./runEval
-Fri Jul 8 13:28:23 CEST 2011
-Fri Jul 8 13:30:24 CEST 2011
-Loading references (4 files)
-Loaded reference translations for 919 sentences.
-Loaded 919 references for scoring with ibm_bleu
-BLEU = 32.25, 76.5|43.1|24.3|13.9 (brev=0.993)
-0.322487
-Fri Jul 8 13:30:24 CEST 2011
-------------
-Fri Jul 8 15:04:00 CEST 2011
-Fri Jul 8 15:05:58 CEST 2011
-Time required for Cube Pruning execution: 77.61 seconds.
-------------
-Fri Jul 8 15:24:39 CEST 2011
-Fri Jul 8 15:26:36 CEST 2011
-Time required for Cube Pruning execution: 79.01 seconds.
-------------
-
-./runEvalFCP
-Fri Jul 8 13:33:17 CEST 2011
-Fri Jul 8 13:35:06 CEST 2011
-Loading references (4 files)
-Loaded reference translations for 919 sentences.
-Loaded 919 references for scoring with ibm_bleu
-BLEU = 32.39, 76.5|43.1|24.5|14.0 (brev=0.994)
-0.323857
-Fri Jul 8 13:35:07 CEST 2011
-------------
-Fri Jul 8 15:08:17 CEST 2011
-Fri Jul 8 15:10:05 CEST 2011
-Time required for Cube Pruning execution: 69.36 seconds.
-------------
-Fri Jul 8 15:21:48 CEST 2011
-Fri Jul 8 15:23:35 CEST 2011
-Time required for Cube Pruning execution: 69.71 seconds.
-------------
-
-./runEvalFCP2
-Fri Jul 8 13:53:38 CEST 2011
-Fri Jul 8 13:55:29 CEST 2011
-Loading references (4 files)
-Loaded reference translations for 919 sentences.
-Loaded 919 references for scoring with ibm_bleu
-BLEU = 32.49, 76.6|43.2|24.5|14.1 (brev=0.994)
-0.324901
-Fri Jul 8 13:55:29 CEST 2011
-------------
-Fri Jul 8 15:12:52 CEST 2011
-Fri Jul 8 15:14:42 CEST 2011
-Time required for Cube Pruning execution: 72.66 seconds.
-------------
-Fri Jul 8 15:19:13 CEST 2011
-Fri Jul 8 15:21:03 CEST 2011
-Time required for Cube Pruning execution: 72.06 seconds.
-------------
diff --git a/graehl/NOTES b/graehl/NOTES
deleted file mode 100755
index 77e99fee..00000000
--- a/graehl/NOTES
+++ /dev/null
@@ -1,18 +0,0 @@
-BUG: tune is bad - urdu conf=baseline tuning (16 dev bleu score???)
-
-conf=baseline force=1 ./tune.sh
-
- decode is good.
-
- UPDATE: maybe tuning is fine; chris never gave me a dev-corpus-filtered grammar and so a bleu of 16 may be what we always got; i just never checked. this means i need to redo tuned-first-pass experiments
-
-valgrind is ok
-
- dist-vest?
-
- (changes made to scoring? plusequals? shared_ptr? small_vector?)
-
- scorer_test is good
-
-
- line_optimer fast_score scorer
diff --git a/graehl/NOTES.beam b/graehl/NOTES.beam
deleted file mode 100755
index a48d1ab7..00000000
--- a/graehl/NOTES.beam
+++ /dev/null
@@ -1,29 +0,0 @@
-(graehl, comments on code)
-
-passive chart: completion of actual translation rules (X or S NT in Hiero), have
-rule features. Hyperedge inserted with copy of rule feature vector
-(non-sparse). Inefficient; should be postponed on intermediate parses with
-global pruning; just keep pointer to rules and models must provide an interface
-to build a (sparse) feat. vector on demand later for the stuff we keep.
-
-multithreading: none. list of hyperarcs for refinement would need to be
-segregated into subforest blocks and have own output lists for later merging.
-e.g. bottom up count number of tail-reachable nodes under each hypernode, then
-assign to workers.
-
-ngram caching: trie, no locks, for example. for threading, LRU hashing w/ locks per bucket is probably better, or per-thread caches. probably cache is reset per sentence?
-
-randlm worth using? guess not.
-
-actually get all 0-state models in 1st pass parse and prune passive edges per span.
-
-allocate cube pruning budget per prev pass
-
-(has been tried in ISI decoder) models with nonstandard state comparison,
-typically (partially) greedy forest scoring, some part of the state is excluded
-from equality/hashing. Within virtual ff interface, would just add equals, hash
-to vtable (rather than the faster raw state compare). If this is done often,
-then add a nonvirtual flag to interface instead, saying whether to use the
-virt. methods or not. or: simple flag by user of ApplyModels (per feature?)
-saying whether to be 100% greedy or 0% - no halfway e.g. item name uses bigram
-context, but score using 5gram state.
diff --git a/graehl/NOTES.earley b/graehl/NOTES.earley
deleted file mode 100755
index 0953708c..00000000
--- a/graehl/NOTES.earley
+++ /dev/null
@@ -1,111 +0,0 @@
-1. fsts (modify target string produced) are quite feasible. augment fsa ff to not just emit features, but also new target words. composition or intersection is no problem (can always bunch into a single FSA/FST lazily by wrapping)
-
-2. sparse fsas (many transitions have -inf score) aren't efficiently supported presently (they are in early_composer where the fsa is a phrase table); the fsa ff interface doesn't even provide a way to query the non-0 transitions (you have to emit a -inf feature). if sparse fsas were expected often and we wanted exact search, then a similar index of the tcfg as in earley_composer would make sense. however, with prob. beam search, we prune out bad-scoring stuff anyway
-
-3. binarization of rhs isn't usually considered necessary in earley, but i liked the idea of optimal binarization making the most sharing possible. however, this means what would have just been a scan is now a scan+complete.
-
-4. prefix (forward) + inside cost. this is phrased in a way so that prefix includes inside. but there's no reason not to think of it in exclusive terms (outside,inside) where prefix=outside*inside when using viterbi. on the other hand, we usually use the outside*inside as the beam score. and furthermore, it looks like when summing over all derivations, there would be some difficulty calculating, as the total inside wouldn't be known at first.
-
-(a,i) r => (+=a*r,r) would become (o,i) r => (+=[(o*i*r)/r],r) = (+=o*i,r)
-(_,b'') (a,b) => (+=a*b'',+=b*b'') would become (_,b'') (o,b) => (?????)
-
-====
-
-
-the target CFG (tcfg) is finite - absolutely no cycles. conceptually we're intersecting it with a wfsa (weights are feature vectors), which is a lot like parsing a lattice, in that states are (source state, dest state) pairs and you're covering some string(s) that go from source->dest in the wfsa.
-
-Chris' paper http://www.ling.umd.edu/~redpony/forest-reordering.pdf - apparently (figure 5) already contains the exact concept we're going for, albeit with only inside scores. http://www.speech.sri.com/cgi-bin/run-distill?ftp:papers/stolcke-cl95.ps.gz describes a nice way of computing sums over derivations given a string by keeping a tuple of ("forward","inner") scores while Earley parsing. I'm not sure yet if this is applicable (because we'll already have the exact outside scores from the -LM forest already, and plan on using cost pushing toward the top so we don't have to explicitly consider them).
-
-normally in earley, one word is consumed at a time, left to right. completions happen from shortest to longest, then (repeated) predictions, and finally scans. i'm sure this has the usual obvious extension to parsing lattices (proceed in some topological order).
-
-but because the wfsa (ngram lm) has cycles and forgets the length of the string (at some point), it's slightly more complicated than lattice parsing the tcfg - there's no topological order over the wfsa states and so you can't finish all the items [x,j] for j from left->right. best first with monotonic total scores (admissable heuristics) is an easy way to avoid generating the whole space; otherwise I can't think of a fixed order that would allow for alternative beaming. as we discussed, arbitrary predicates filtering candidate items can be added if exact best-first is too slow
-
-if the wfsa were just a single string t[0...n-1], then any time you have an item [i,j]X->a.b that means that there's some derivation in the tCFG of S =>* t[0...i-1]Xc => t[0....i-1]abc =>* t[0...j-1]bc , for a FSA, the analog is S =>* W(0,i)Xc => W(0,i)abc =>* W(0,i)W(i,j)bc where W(a,b) means any string in the wfsa language with a as the start state and b as the final state.
-
-
-http://www.isi.edu/natural-language/teaching/cs544/cs544-huang-3-Earley.pdf
-
-http://www.isi.edu/~lhuang/dp-final.pdf (variation on stolcke 1995 prefix cost)
-
-http://acl.ldc.upenn.edu/P/P07/P07-1019.pdf - phrase based lazy priority queue "cube growing" descendants (p149)
-
-
-
-
-
-http://www.speech.sri.com/cgi-bin/run-distill?ftp:papers/stolcke-cl95.ps.gz
-
-http://www.icsi.berkeley.edu/~stolcke/papers/cl95/node10.html#SECTION00042000000000000000
-
-a) An (unconstrained) Earley path, or simply path, is a sequence of Earley
-states linked by prediction, scanning, or completion. For the purpose of
-this definition, we allow scanning to operate in “generation mode,” i.e., all
-states with terminals to the right of the dot can be scanned, not just those
-matching the input. (For completed states, the predecessor state is defined
-to be the complete state from the same state set contributing to the
-completion.)
-b) A path is said to be constrained by, or generate a string x if the terminals
-immediately to the left of the dot in all scanned states, in sequence, form
-the string x.
-c) A path is complete if the last state on it matches the first, except that the
-dot has moved to the end of the RHS.
-d) We say that a path starts with nonterminal X if the first state on it is a
-predicted statewith X on the LHS.
-e) The length of a path is defined as the number of scanned states on it.
-
-Note that the definition of path length is somewhat counter-intuitive, but is motivated
-by the fact that only scanned states correspond directly to input symbols. Thus,
-the length of a path is always the same as the length of the input string it generates.
-
-A constrained path starting with the initial state contains a sequence of states from
-state set 0 derived by repeated prediction, followed by a single state from set 1 produced
-by scanning the first symbol, followed by a sequence of states produced by completion,
-followed by a sequence of predicted states, followed by a state scanning the second
-symbol, and so on. The significance of Earley paths is that they are in a one-to-one
-correspondence with left-most derivations.
-
-
-=============
-
-The forward probability alpha_i(X[k]->x.y) is the sum of the probabilities of all
-constrained paths of length that end in state X[k]->x.y
-
-b) The inner probability beta_i(X[k]->x.y) is the sum of the probabilities of all
-paths of length i-k that start in state X[k,k]->.xy and end in X[k,i]->x.y, and generate the input symbols x[k,...,i-1]
-
-(forward,inner) [i.e. (outside,inside)?]
- unchanged by scan (rule cost is paid up front when predicting)
-
-if X[k,i] -> x.Yz (a,b) and rule Y -> r (p)
-then Y[i,i] -> .r (a',p) with a' += a*p
-
-if Y[j,i]->y. (a'',b'') and X[k,j]->r.Ys (a,b)
-then X[k,i]->rY.s (a',b') with a' += a*b'', b' += b*b''
-
-(this is summing over all derivations)
-
-
-==========
-
-is forward cost viterbi fine? i.e. can i have items whose names ignore the lhs NT (look up predictions that i finish lazily / graph structured?)
-======
-
-1) A -> x . * (trie)
-
-this is somewhat nice. cost pushed for best first, of course. similar benefit as left-branching binarization without the explicit predict/complete steps?
-
-vs. just
-
-2) * -> x . y
-
-here you have to potentially list out all A -> . x y as items * -> . x y immediately, and shared rhs seqs won't be shared except at the usual single-NT predict/complete. of course, the prediction of items -> . x y can occur lazy best-first.
-
-vs.
-
-3) * -> x . *
-
-with 3, we predict all sorts of useless items - that won't give us our goal A and may not partcipate in any parse. this is not a good option at all.
-
-======
-
--LM forest may have many in-edges per V. (many rules per NT lhs). so instead of generating all successors for scan/predict, i wanted to have them in sorted (admissable) -LM cost order and postpone once the prefix+rule part is more expensive than something else in the agenda. question: how many such postponed successor things
diff --git a/graehl/NOTES.lm.phrase b/graehl/NOTES.lm.phrase
deleted file mode 100755
index e87cc6fb..00000000
--- a/graehl/NOTES.lm.phrase
+++ /dev/null
@@ -1,180 +0,0 @@
-possibly the most direct solution is to print every individual probability from LM (to global fstream?). since the difference happens even w/o shortening, disable shortening to remove the possible effect of floor(p+bo) vs floor(p)+bo disagreeing
-
-+LM forest (nodes/edges): 2163/11293
- +LM forest (paths): 7.14685e+14
- +LM forest Viterbi logp: -490.21
- +LM forest Viterbi: karachi ( AstRAf rpwrtRr ) in karachi on monday in different HAdvAt gyY and killed 4 people including a woman found dead body of a person from the sea .
- +LM forest derivation: ({<0,28>[1] ||| (final r->l(( karachi|<s> ) start=[ <s>]->{karachi (} r->l(</s>|. sea) end=[sea .]->{</s>} LanguageModelFsa=-5.74766; }({<0,28>[1] [2] ||| [karachi ( : [a woman]] r->l(( karachi|) [found dead : [sea .]] r->l(dead found|woman a) = [karachi ( : [sea .]] LanguageModelFsa=-5.93027 h=-5.83552); }({<0,20>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [gyY and : [a woman]] r->l(and gyY|) = [karachi ( : [a woman]] LanguageModelFsa=-101.72 h=-5.83552); }({<0,12>[1] [2] ||| [karachi ( : [in karachi]] r->l(( karachi|) [on monday : []] r->l(monday on|karachi in) = [karachi ( : []] LanguageModelFsa=-1.99946 h=-5.83552); }({<0,7>[1] [2] ||| [karachi ( : [rpwrtRr )]] r->l(( karachi|) [in karachi : [in karachi]] r->l(karachi in|) rpwrtRr) = [karachi ( : [in karachi]] LanguageModelFsa=-3.40247 h=-5.83552); }({<0,5>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [rpwrtRr ) : [rpwrtRr )]] r->l() rpwrtRr|) = [karachi ( : [rpwrtRr )]] LanguageModelFsa=-102.623 h=-5.83552); }({<0,3>[1] ||| [karachi ( : []] r->l(( karachi|) = [karachi ( : []] LanguageModelFsa=0 h=-5.83552); }({<0,3>karachi [1] ||| [( AstRAf : []] r->l(( karachi|) r->l(AstRAf|( karachi) = [karachi ( : []] LanguageModelFsa=-100 h=-5.83552); r->l(karachi|)}({<1,3>( [1] ||| [AstRAf] r->l(AstRAf (|) = [( AstRAf : []] LanguageModelFsa=0 h=-102.641); r->l((|)}({<2,3>AstRAf ||| r->l(AstRAf|) = [AstRAf] LanguageModelFsa=0 h=-100); r->l(AstRAf|)}) ) ) ) ({<3,5>[1] ) ||| [rpwrtRr] r->l() rpwrtRr|) = [rpwrtRr ) : [rpwrtRr )]] LanguageModelFsa=0 h=-102.623); r->l()|)}({<3,4>rpwrtRr ||| r->l(rpwrtRr|) = [rpwrtRr] LanguageModelFsa=0 h=-100); r->l(rpwrtRr|)}) ) ) ({<5,7>in karachi ||| r->l(karachi in|) = [in karachi : [in karachi]] LanguageModelFsa=0 h=-3.80404); r->l(karachi in|)}) ) ({<7,12>on monday in [1] ||| r->l(monday on|) rule-phrase[in] r->l(in|monday on) [different HAdvAt : []] r->l(HAdvAt different|in monday) = [on monday : []] LanguageModelFsa=-103.918 h=-3.91305); r->l(in monday on|)}({<9,11>different [1] ||| [HAdvAt] r->l(HAdvAt different|) = [different HAdvAt : []] LanguageModelFsa=0 h=-103.573); r->l(different|)}({<10,11>HAdvAt ||| r->l(HAdvAt|) = [HAdvAt] LanguageModelFsa=0 h=-100); r->l(HAdvAt|)}) ) ) ) ({<12,20>[2] killed [1] ||| [gyY and : [gyY and]] r->l(and gyY|) rule-phrase[killed] r->l(killed|and gyY) [4 people : [a woman]] r->l(people 4|killed and) = [gyY and : [a woman]] LanguageModelFsa=-5.57026 h=-101.72); r->l(killed|)}({<12,16>[2] people including a [1] ||| [4] r->l(people 4|) rule-phrase[including a] r->l(a including|people 4) [woman] r->l(woman|a including) = [4 people : [a woman]] LanguageModelFsa=-3.99305 h=-6.22734); r->l(a including people|)}({<12,13>woman ||| r->l(woman|) = [woman] LanguageModelFsa=0 h=-3.82934); r->l(woman|)}) ({<14,15>4 ||| r->l(4|) = [4] LanguageModelFsa=0 h=-3.62974); r->l(4|)}) ) ({<18,20>[1] and ||| [gyY] r->l(and gyY|) = [gyY and : [gyY and]] LanguageModelFsa=0 h=-101.72); r->l(and|)}({<18,19>gyY ||| r->l(gyY|) = [gyY] LanguageModelFsa=0 h=-100); r->l(gyY|)}) ) ) ) ({<20,28>[1] the sea . ||| [found dead : [ from]] r->l(dead found|) rule-phrase[the sea .] r->l(. sea the|from ) = [found dead : [sea .]] LanguageModelFsa=-4.84745 h=-7.62839); r->l(. sea the|)}({<21,27>found [1] from ||| [dead body : []] r->l(dead found|) r->l(body|dead found) rule-phrase[from] r->l(from|) = [found dead : [ from]] LanguageModelFsa=-3.42491 h=-7.62839); r->l(found|) r->l(from|)}({<22,26>dead body of [1] ||| r->l(body dead|) rule-phrase[of] r->l(of|body dead) [a person : []] r->l(person a|of body) = [dead body : []] LanguageModelFsa=-2.9934 h=-4.63222); r->l(of body dead|)}({<22,24>a [1] ||| [person] r->l(person a|) = [a person : []] LanguageModelFsa=0 h=-4.90016); r->l(a|)}({<23,24>person ||| r->l(person|) = [person] LanguageModelFsa=0 h=-3.50165); r->l(person|)}) ) ) ) ) ) )
- +LM forest features: Arity_0=-3.47436;Arity_1=-4.77724;Arity_2=-3.04006;Glue=5;LanguageModel=-446.49;LmFsa=-446.17;PassThrough=5;PhraseModel_0=12.2199;PhraseModel_1=11.6391;PhraseModel_2=10.9878;WordPenalty=-13.0288;Unigram=-462.696;UnigramFsa=-462.696
-Output kbest to -
-0 ||| karachi ( AstRAf rpwrtRr ) in karachi on monday in different HAdvAt gyY and killed 4 people including a woman found dead body of a person from the sea . ||| Arity_0=-3.47436;Arity_1=-4.77724;Arity_2=-3.04006;Glue=5;LanguageModel=-446.49;LmFsa=-446.17;PassThrough=5;PhraseModel_0=12.2199;PhraseModel_1=11.6391;PhraseModel_2=10.9878;WordPenalty=-13.0288;Unigram=-462.696;UnigramFsa=-462.696 ||| -490.21
-
-sent_id=0
-({<0,28>[1] ||| (final r->l(( karachi|<s> ) start=[ <s>]->{karachi (} r->l(</s>|. sea) end=[sea .]->{</s>} LanguageModelFsa=-5.74766; }
- ({<0,28>[1] [2] ||| [karachi ( : [a woman]] r->l(( karachi|) [found dead : [sea .]] r->l(dead found|woman a) = [karachi ( : [sea .]] LanguageModelFsa=-5.93027 h=-5.83552); }
- ({<0,20>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [gyY and : [a woman]] r->l(and gyY|) = [karachi ( : [a woman]] LanguageModelFsa=-101.72 h=-5.83552); }
- ({<0,12>[1] [2] ||| [karachi ( : [in karachi]] r->l(( karachi|) [on monday : []] r->l(monday on|karachi in) = [karachi ( : []] LanguageModelFsa=-1.99946 h=-5.83552); }
- ({<0,7>[1] [2] ||| [karachi ( : [rpwrtRr )]] r->l(( karachi|) [in karachi : [in karachi]] r->l(karachi in|) rpwrtRr) = [karachi ( : [in karachi]] LanguageModelFsa=-3.40247 h=-5.83552); }
- ({<0,5>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [rpwrtRr ) : [rpwrtRr )]] r->l() rpwrtRr|) = [karachi ( : [rpwrtRr )]] LanguageModelFsa=-102.623 h=-5.83552); }
- ({<0,3>[1] ||| [karachi ( : []] r->l(( karachi|) = [karachi ( : []] LanguageModelFsa=0 h=-5.83552); }
- ({<0,3>karachi [1] ||| [( AstRAf : []] r->l(( karachi|) r->l(AstRAf|( karachi) = [karachi ( : []] LanguageModelFsa=-100 h=-5.83552); r->l(karachi|)}
- ({<1,3>( [1] ||| [AstRAf] r->l(AstRAf (|) = [( AstRAf : []] LanguageModelFsa=0 h=-102.641); r->l((|)}
- ({<2,3>AstRAf ||| r->l(AstRAf|) = [AstRAf] LanguageModelFsa=0 h=-100); r->l(AstRAf|)}
- )
- )
- )
- )
- ({<3,5>[1] ) ||| [rpwrtRr] r->l() rpwrtRr|) = [rpwrtRr ) : [rpwrtRr )]] LanguageModelFsa=0 h=-102.623); r->l()|)}
- ({<3,4>rpwrtRr ||| r->l(rpwrtRr|) = [rpwrtRr] LanguageModelFsa=0 h=-100); r->l(rpwrtRr|)}
- )
- )
- )
- ({<5,7>in karachi ||| r->l(karachi in|) = [in karachi : [in karachi]] LanguageModelFsa=0 h=-3.80404); r->l(karachi in|)}
- )
- )
- ({<7,12>on monday in [1] ||| r->l(monday on|) rule-phrase[in] r->l(in|monday on) [different HAdvAt : []] r->l(HAdvAt different|in monday) = [on monday : []] LanguageModelFsa=-103.918 h=-3.91305); r->l(in monday on|)}
- ({<9,11>different [1] ||| [HAdvAt] r->l(HAdvAt different|) = [different HAdvAt : []] LanguageModelFsa=0 h=-103.573); r->l(different|)}
- ({<10,11>HAdvAt ||| r->l(HAdvAt|) = [HAdvAt] LanguageModelFsa=0 h=-100); r->l(HAdvAt|)}
- )
- )
- )
- )
- ({<12,20>[2] killed [1] ||| [gyY and : [gyY and]] r->l(and gyY|) rule-phrase[killed] r->l(killed|and gyY) [4 people : [a woman]] r->l(people 4|killed and) = [gyY and : [a woman]] LanguageModelFsa=-5.57026 h=-101.72); r->l(killed|)}
- ({<12,16>[2] people including a [1] ||| [4] r->l(people 4|) rule-phrase[including a] r->l(a including|people 4) [woman] r->l(woman|a including) = [4 people : [a woman]] LanguageModelFsa=-3.99305 h=-6.22734); r->l(a including people|)}
- ({<12,13>woman ||| r->l(woman|) = [woman] LanguageModelFsa=0 h=-3.82934); r->l(woman|)}
- )
- ({<14,15>4 ||| r->l(4|) = [4] LanguageModelFsa=0 h=-3.62974); r->l(4|)}
- )
- )
- ({<18,20>[1] and ||| [gyY] r->l(and gyY|) = [gyY and : [gyY and]] LanguageModelFsa=0 h=-101.72); r->l(and|)}
- ({<18,19>gyY ||| r->l(gyY|) = [gyY] LanguageModelFsa=0 h=-100); r->l(gyY|)}
- )
- )
- )
- )
- ({<20,28>[1] the sea . ||| [found dead : [ from]] r->l(dead found|) rule-phrase[the sea .] r->l(. sea the|from ) = [found dead : [sea .]] LanguageModelFsa=-4.84745 h=-7.62839); r->l(. sea the|)}
- ({<21,27>found [1] from ||| [dead body : []] r->l(dead found|) r->l(body|dead found) rule-phrase[from] r->l(from|) = [found dead : [ from]] LanguageModelFsa=-3.42491 h=-7.62839); r->l(found|) r->l(from|)}
- ({<22,26>dead body of [1] ||| r->l(body dead|) rule-phrase[of] r->l(of|body dead) [a person : []] r->l(person a|of body) = [dead body : []] LanguageModelFsa=-2.9934 h=-4.63222); r->l(of body dead|)}
- ({<22,24>a [1] ||| [person] r->l(person a|) = [a person : []] LanguageModelFsa=0 h=-4.90016); r->l(a|)}
- ({<23,24>person ||| r->l(person|) = [person] LanguageModelFsa=0 h=-3.50165); r->l(person|)}
- )
- )
- )
- )
- )
- )
-)
-0 ||| karachi ( AstRAf rpwrtRr ) in karachi on monday in different HAdvAt gyY and killed 4 people including a woman found the dead body of a person from the sea . ||| Arity_0=-3.47436;Arity_1=-4.77724;Arity_2=-3.04006;Glue=5;LanguageModel=-446.828;LmFsa=-446.508;PassThrough=5;PhraseModel_0=12.697;PhraseModel_1=11.6391;PhraseModel_2=11.5728;WordPenalty=-13.4631;Unigram=-463.765;UnigramFsa=-463.765 ||| -490.295
-
-sent_id=0
-({<0,28>[1] ||| (final r->l(( karachi|<s> ) start=[ <s>]->{karachi (} r->l(</s>|. sea) end=[sea .]->{</s>} LanguageModelFsa=-5.74766; }
- ({<0,28>[1] [2] ||| [karachi ( : [a woman]] r->l(( karachi|) [found the : [sea .]] r->l(the found|woman a) = [karachi ( : [sea .]] LanguageModelFsa=-3.6217 h=-5.83552); }
- ({<0,20>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [gyY and : [a woman]] r->l(and gyY|) = [karachi ( : [a woman]] LanguageModelFsa=-101.72 h=-5.83552); }
- ({<0,12>[1] [2] ||| [karachi ( : [in karachi]] r->l(( karachi|) [on monday : []] r->l(monday on|karachi in) = [karachi ( : []] LanguageModelFsa=-1.99946 h=-5.83552); }
- ({<0,7>[1] [2] ||| [karachi ( : [rpwrtRr )]] r->l(( karachi|) [in karachi : [in karachi]] r->l(karachi in|) rpwrtRr) = [karachi ( : [in karachi]] LanguageModelFsa=-3.40247 h=-5.83552); }
- ({<0,5>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [rpwrtRr ) : [rpwrtRr )]] r->l() rpwrtRr|) = [karachi ( : [rpwrtRr )]] LanguageModelFsa=-102.623 h=-5.83552); }
- ({<0,3>[1] ||| [karachi ( : []] r->l(( karachi|) = [karachi ( : []] LanguageModelFsa=0 h=-5.83552); }
- ({<0,3>karachi [1] ||| [( AstRAf : []] r->l(( karachi|) r->l(AstRAf|( karachi) = [karachi ( : []] LanguageModelFsa=-100 h=-5.83552); r->l(karachi|)}
- ({<1,3>( [1] ||| [AstRAf] r->l(AstRAf (|) = [( AstRAf : []] LanguageModelFsa=0 h=-102.641); r->l((|)}
- ({<2,3>AstRAf ||| r->l(AstRAf|) = [AstRAf] LanguageModelFsa=0 h=-100); r->l(AstRAf|)}
- )
- )
- )
- )
- ({<3,5>[1] ) ||| [rpwrtRr] r->l() rpwrtRr|) = [rpwrtRr ) : [rpwrtRr )]] LanguageModelFsa=0 h=-102.623); r->l()|)}
- ({<3,4>rpwrtRr ||| r->l(rpwrtRr|) = [rpwrtRr] LanguageModelFsa=0 h=-100); r->l(rpwrtRr|)}
- )
- )
- )
- ({<5,7>in karachi ||| r->l(karachi in|) = [in karachi : [in karachi]] LanguageModelFsa=0 h=-3.80404); r->l(karachi in|)}
- )
- )
- ({<7,12>on monday in [1] ||| r->l(monday on|) rule-phrase[in] r->l(in|monday on) [different HAdvAt : []] r->l(HAdvAt different|in monday) = [on monday : []] LanguageModelFsa=-103.918 h=-3.91305); r->l(in monday on|)}
- ({<9,11>different [1] ||| [HAdvAt] r->l(HAdvAt different|) = [different HAdvAt : []] LanguageModelFsa=0 h=-103.573); r->l(different|)}
- ({<10,11>HAdvAt ||| r->l(HAdvAt|) = [HAdvAt] LanguageModelFsa=0 h=-100); r->l(HAdvAt|)}
- )
- )
- )
- )
- ({<12,20>[2] killed [1] ||| [gyY and : [gyY and]] r->l(and gyY|) rule-phrase[killed] r->l(killed|and gyY) [4 people : [a woman]] r->l(people 4|killed and) = [gyY and : [a woman]] LanguageModelFsa=-5.57026 h=-101.72); r->l(killed|)}
- ({<12,16>[2] people including a [1] ||| [4] r->l(people 4|) rule-phrase[including a] r->l(a including|people 4) [woman] r->l(woman|a including) = [4 people : [a woman]] LanguageModelFsa=-3.99305 h=-6.22734); r->l(a including people|)}
- ({<12,13>woman ||| r->l(woman|) = [woman] LanguageModelFsa=0 h=-3.82934); r->l(woman|)}
- )
- ({<14,15>4 ||| r->l(4|) = [4] LanguageModelFsa=0 h=-3.62974); r->l(4|)}
- )
- )
- ({<18,20>[1] and ||| [gyY] r->l(and gyY|) = [gyY and : [gyY and]] LanguageModelFsa=0 h=-101.72); r->l(and|)}
- ({<18,19>gyY ||| r->l(gyY|) = [gyY] LanguageModelFsa=0 h=-100); r->l(gyY|)}
- )
- )
- )
- )
- ({<20,28>[1] the sea . ||| [found the : [ from]] r->l(the found|) rule-phrase[the sea .] r->l(. sea the|from ) = [found the : [sea .]] LanguageModelFsa=-4.84745 h=-5.31983); r->l(. sea the|)}
- ({<21,27>found [1] from ||| [the dead : []] r->l(the found|) r->l(dead|the found) rule-phrase[from] r->l(from|) = [found the : [ from]] LanguageModelFsa=-5.34421 h=-5.31983); r->l(found|) r->l(from|)}
- ({<22,26>the dead body of [1] ||| r->l(dead the|) rule-phrase[body of] r->l(of body|dead the) [a person : []] r->l(person a|of body) = [the dead : []] LanguageModelFsa=-3.7205 h=-4.97373); r->l(of body dead the|)}
- ({<22,24>a [1] ||| [person] r->l(person a|) = [a person : []] LanguageModelFsa=0 h=-4.90016); r->l(a|)}
- ({<23,24>person ||| r->l(person|) = [person] LanguageModelFsa=0 h=-3.50165); r->l(person|)}
- )
- )
- )
- )
- )
- )
-)
-0 ||| karachi ( AstRAf rpwrtRr ) in karachi on monday in different HAdvAt gyY killed 4 people including a woman while dead body of a person from the sea . ||| Arity_0=-3.47436;Arity_1=-4.77724;Arity_2=-3.04006;Glue=5;LanguageModel=-445.419;LmFsa=-445.099;PassThrough=5;PhraseModel_0=12.5687;PhraseModel_1=12.5781;PhraseModel_2=9.61571;WordPenalty=-12.5945;Unigram=-461.303;UnigramFsa=-461.303 ||| -490.646
-
-sent_id=0
-({<0,28>[1] ||| (final r->l(( karachi|<s> ) start=[ <s>]->{karachi (} r->l(</s>|. sea) end=[sea .]->{</s>} LanguageModelFsa=-5.74766; }
- ({<0,28>[1] [2] ||| [karachi ( : [a woman]] r->l(( karachi|) [while dead : [sea .]] r->l(dead while|woman a) = [karachi ( : [sea .]] LanguageModelFsa=-5.71074 h=-5.83552); }
- ({<0,19>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [gyY killed : [a woman]] r->l(killed gyY|) = [karachi ( : [a woman]] LanguageModelFsa=-103.345 h=-5.83552); }
- ({<0,12>[1] [2] ||| [karachi ( : [in karachi]] r->l(( karachi|) [on monday : []] r->l(monday on|karachi in) = [karachi ( : []] LanguageModelFsa=-1.99946 h=-5.83552); }
- ({<0,7>[1] [2] ||| [karachi ( : [rpwrtRr )]] r->l(( karachi|) [in karachi : [in karachi]] r->l(karachi in|) rpwrtRr) = [karachi ( : [in karachi]] LanguageModelFsa=-3.40247 h=-5.83552); }
- ({<0,5>[1] [2] ||| [karachi ( : []] r->l(( karachi|) [rpwrtRr ) : [rpwrtRr )]] r->l() rpwrtRr|) = [karachi ( : [rpwrtRr )]] LanguageModelFsa=-102.623 h=-5.83552); }
- ({<0,3>[1] ||| [karachi ( : []] r->l(( karachi|) = [karachi ( : []] LanguageModelFsa=0 h=-5.83552); }
- ({<0,3>karachi [1] ||| [( AstRAf : []] r->l(( karachi|) r->l(AstRAf|( karachi) = [karachi ( : []] LanguageModelFsa=-100 h=-5.83552); r->l(karachi|)}
- ({<1,3>( [1] ||| [AstRAf] r->l(AstRAf (|) = [( AstRAf : []] LanguageModelFsa=0 h=-102.641); r->l((|)}
- ({<2,3>AstRAf ||| r->l(AstRAf|) = [AstRAf] LanguageModelFsa=0 h=-100); r->l(AstRAf|)}
- )
- )
- )
- )
- ({<3,5>[1] ) ||| [rpwrtRr] r->l() rpwrtRr|) = [rpwrtRr ) : [rpwrtRr )]] LanguageModelFsa=0 h=-102.623); r->l()|)}
- ({<3,4>rpwrtRr ||| r->l(rpwrtRr|) = [rpwrtRr] LanguageModelFsa=0 h=-100); r->l(rpwrtRr|)}
- )
- )
- )
- ({<5,7>in karachi ||| r->l(karachi in|) = [in karachi : [in karachi]] LanguageModelFsa=0 h=-3.80404); r->l(karachi in|)}
- )
- )
- ({<7,12>on monday in [1] ||| r->l(monday on|) rule-phrase[in] r->l(in|monday on) [different HAdvAt : []] r->l(HAdvAt different|in monday) = [on monday : []] LanguageModelFsa=-103.918 h=-3.91305); r->l(in monday on|)}
- ({<9,11>different [1] ||| [HAdvAt] r->l(HAdvAt different|) = [different HAdvAt : []] LanguageModelFsa=0 h=-103.573); r->l(different|)}
- ({<10,11>HAdvAt ||| r->l(HAdvAt|) = [HAdvAt] LanguageModelFsa=0 h=-100); r->l(HAdvAt|)}
- )
- )
- )
- )
- ({<12,19>[2] killed [1] ||| [gyY] r->l(killed gyY|) [4 people : [a woman]] r->l(people 4|killed gyY) = [gyY killed : [a woman]] LanguageModelFsa=-2.98475 h=-103.345); r->l(killed|)}
- ({<12,16>[2] people including a [1] ||| [4] r->l(people 4|) rule-phrase[including a] r->l(a including|people 4) [woman] r->l(woman|a including) = [4 people : [a woman]] LanguageModelFsa=-3.99305 h=-6.22734); r->l(a including people|)}
- ({<12,13>woman ||| r->l(woman|) = [woman] LanguageModelFsa=0 h=-3.82934); r->l(woman|)}
- )
- ({<14,15>4 ||| r->l(4|) = [4] LanguageModelFsa=0 h=-3.62974); r->l(4|)}
- )
- )
- ({<18,19>gyY ||| r->l(gyY|) = [gyY] LanguageModelFsa=0 h=-100); r->l(gyY|)}
- )
- )
- )
- ({<19,28>while [1] ||| [dead body : [sea .]] r->l(dead while|) r->l(body|dead while) = [while dead : [sea .]] LanguageModelFsa=-1.20144 h=-6.25144); r->l(while|)}
- ({<20,28>[1] . ||| [dead body : [the sea]] r->l(body dead|) rule-phrase[.] r->l(.|sea the) = [dead body : [sea .]] LanguageModelFsa=-0.45297 h=-4.63222); r->l(.|)}
- ({<20,26>[1] the sea ||| [dead body : [ from]] r->l(body dead|) rule-phrase[the sea] r->l(sea the|from ) = [dead body : [the sea]] LanguageModelFsa=-4.39448 h=-4.63222); r->l(sea the|)}
- ({<21,26>dead body of [1] ||| r->l(body dead|) rule-phrase[of] r->l(of|body dead) [a person : [ from]] r->l(person a|of body) = [dead body : [ from]] LanguageModelFsa=-2.9934 h=-4.63222); r->l(of body dead|)}
- ({<21,24>a [1] from ||| [person] r->l(person a|) rule-phrase[from] r->l(from|) = [a person : [ from]] LanguageModelFsa=-2.33299 h=-4.90016); r->l(a|) r->l(from|)}
- ({<23,24>person ||| r->l(person|) = [person] LanguageModelFsa=0 h=-3.50165); r->l(person|)}
- )
- )
- )
- )
- )
- )
- )
-)
diff --git a/graehl/NOTES.partial.binarize b/graehl/NOTES.partial.binarize
deleted file mode 100755
index a9985891..00000000
--- a/graehl/NOTES.partial.binarize
+++ /dev/null
@@ -1,21 +0,0 @@
-Earley doesn't require binarized rules.
-
-But a (partially) binarized grammar may lead to smaller (exhaustive or heuristic) charts. The tradeoff is mostly more reduce steps (the # of NTs should be similar or less than the usual dotted-item binarization0.
-
-Optionally collapse a rule rhs to unary as well (normal binarization would stop when an rhs is binary), if the rule to collapse it exists or is frequent enough.
-
-Greedy binarization schemes:
-
-1) (repeatedly) for the most frequent rhs bigram "X a" create a binary rule "V -> X a" and replace "X a" in all rules' rhs with V. stop if the most frequent bigram has count lower than some threshold (e.g. 3), because each instance of it saves one symbol, but the new rule has 3 symbols.
-
-2) (repeatedly) for each rule, pick the most frequent bigram in its rhs and binarize it (2a for that rule only, 2b everywhere that bigram occurs). again, some frequency threshold. optionally allow collapsing an rhs to unary. this fails to use some substitutions that are available "for free" based on actions taken at earlier rules w/ no frequent bigrams in common with this one.
-
-3) (DeNero) (for complete binarization only?) for each rule until binarized, pick a split point k of L->r[0..n) to make rules L->V1 V2, V1->r[0..k) V2->r[k..n), to minimize the number of new rules created. If no prefix or suffix of r already exists as a virtual rule, then choose k=floor(n/2). To amend this to consider frequency of rhs, use the frequency of rhs-prefix/suffixes to decide where to split?
-
-4?) Song, Chin-Yew Lin - seems to require collecting stats from a larged parsed corpus - interesting idea: make rules that don't match fail early (that's 1 way you get a speedup), and pick V1 -> ... based on some kind of expected utility.
-
-5) l2r, r2l. yawn.
-
-1) seems the most sensible. don't just keep a count for each bigram, keep a set of left and right adjacent partially overlapping bigrams (i.e. the words left and right). for "a b" if "c" and "d" occur to the right, then "b c" and "b d" would be the right adjacent bigrams. when replacing a bigram, follow the left and right adjacencies to decrement the count of those bigrams, and add a (bidirectional) link to the new bigram.
-
-Further, partial-1) can be followed by complete-3) or 5) - although i see no reason not to just continue 1) until the grammar is binary if you want a full binarization.
diff --git a/graehl/NOTES.wfsa b/graehl/NOTES.wfsa
deleted file mode 100755
index b74dc810..00000000
--- a/graehl/NOTES.wfsa
+++ /dev/null
@@ -1,16 +0,0 @@
-left-to-right finite-state models (with heuristic) that depend only on the target string.
-
-http://github.com/jganitkevitch/cdec.git has some progress toward this:
-
-earley_generator.*: make a trie of earley dotted items (from first pass finite parse projected to target side?) and rules for each earley deduction step (is the predict step actually making a hyperedge? or is it marked "active" and so doesn't appear in the result?)
-
-ff_ltor.*: interface for l2r models; needless scoring of "complete" action (only heuristic changes there and heuristics can just be precomputed for all dot-items
-ff_lm.*: ugly clone of regular LM model with l2r interface
-
-apply_models.*: ApplyLeftToRightModelSet
-
-l2r features:
-
-multiple feature ids from single model?
-
-declare markov bound for bottom-up scoring (inside items) wrapper, and "backoff start" state (i.e. empty context, not <s> context)
diff --git a/rescore/cdec_kbest_to_zmert.pl b/rescore/cdec_kbest_to_zmert.pl
deleted file mode 100755
index 88bc9682..00000000
--- a/rescore/cdec_kbest_to_zmert.pl
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use utf8;
-use Getopt::Long;
-
-my $feature_file;
-my $hyp_file;
-my $help;
-
-Getopt::Long::Configure("no_auto_abbrev");
-if (GetOptions(
- "feature_file|f=s" => \$feature_file,
- "hypothesis_file|h=s" => \$hyp_file,
- "help" => \$help,
-) == 0 || @ARGV!=0 || $help || !$feature_file || !$hyp_file) {
- usage();
- exit(1);
-}
-
-open W, "<$feature_file" or die "Can't read $feature_file: $!";
-my %weights;
-my @all_feats;
-while(<W>) {
- chomp;
- next if /^#/;
- next if /^\s*$/;
- my ($fname, $w) = split /\s+/;
- push @all_feats, $fname;
- $weights{$fname} = 1;
-}
-close W;
-
-open HYP, "<$hyp_file" or die "Can't read $hyp_file: $!";
-while(<HYP>) {
- chomp;
- my ($id, $hyp, $feats) = split / \|\|\| /;
- my @afeats = split /\s+/, $feats;
- my $tot = 0;
- my %fvaldict;
- for my $featpair (@afeats) {
- my ($fname,$fval) = split /=/, $featpair;
- $fvaldict{$fname} = $fval;
- my $weight = $weights{$fname};
- warn "Feature '$fname' not mentioned in feature file $feature_file" unless defined $weight;
- $weights{$fname} = 1;
- }
- my @trans;
- for my $feat (@all_feats) {
- my $v = $fvaldict{$feat};
- if (!defined $v) { $v = '0.0'; }
- push @trans, $v;
- }
- print "$id ||| $hyp ||| @trans\n";
-}
-close HYP;
-
-sub usage {
- print <<EOT;
-Usage: $0 -f feature-file.txt/weights.txt -h hyp.nbest.txt
- Puts a cdec k-best list into Joshua/ZMERT format
-EOT
-}
-
diff --git a/rescore/example/README b/rescore/example/README
deleted file mode 100644
index 92b657ca..00000000
--- a/rescore/example/README
+++ /dev/null
@@ -1,4 +0,0 @@
-Rescoring example:
-
- ../rescore_with_cdec_model.pl -c cdec.ini -s source.txt -h hyp.txt -w weights -f RescoringModel
-
diff --git a/rescore/example/cdec.ini b/rescore/example/cdec.ini
deleted file mode 100644
index 29a1ece3..00000000
--- a/rescore/example/cdec.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-formalism=scfg
-grammar=small.scfg
diff --git a/rescore/example/hyp.txt b/rescore/example/hyp.txt
deleted file mode 100644
index c4757f6c..00000000
--- a/rescore/example/hyp.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-0 ||| A B C ||| F1=1 F2=1
-0 ||| A b c ||| F1=1 F3=1
-0 ||| A C ||| F4=1
-1 ||| X Y ||| F5=1
-1 ||| XY ||| F6=1
diff --git a/rescore/example/small.scfg b/rescore/example/small.scfg
deleted file mode 100644
index 402a585a..00000000
--- a/rescore/example/small.scfg
+++ /dev/null
@@ -1,9 +0,0 @@
-[X] ||| a b c ||| A B C ||| fe=0.2
-[X] ||| a b ||| A B ||| fe=0.8
-[X] ||| c ||| C ||| fe=0.3
-[X] ||| c ||| c ||| fe=1.3
-[X] ||| a b c ||| A B c ||| fe=0.8
-[X] ||| a b c ||| A C ||| fe=2
-[X] ||| x ||| X ||| fe=0.2
-[X] ||| y ||| Y ||| fe=0.5
-[X] ||| x y ||| XY ||| fe=0.8
diff --git a/rescore/example/source.txt b/rescore/example/source.txt
deleted file mode 100644
index e8d4eda2..00000000
--- a/rescore/example/source.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-a b c
-x y
diff --git a/rescore/example/weights b/rescore/example/weights
deleted file mode 100644
index a22d36f1..00000000
--- a/rescore/example/weights
+++ /dev/null
@@ -1 +0,0 @@
-fe -0.8
diff --git a/rescore/generate_zmert_params_from_weights.pl b/rescore/generate_zmert_params_from_weights.pl
deleted file mode 100755
index a9287896..00000000
--- a/rescore/generate_zmert_params_from_weights.pl
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/perl -w
-
-my %defaults;
-$defaults{'LanguageModel'} = "Opt\t0\t10\t0\t2.5";
-$defaults{'EgivenF'} = "Opt\t-5\t0.5\t-3\t0.5";
-$defaults{'LexEGivenF'} = "Opt\t-5\t0.5\t-3\t0.5";
-$defaults{'LexFGivenE'} = "Opt\t-5\t0.5\t-3\t0.5";
-$defaults{'PassThrough'} = "Opt\t-Inf\t+Inf\t-10\t0";
-$defaults{'WordPenalty'} = "Opt\t-Inf\t2\t-5\t0";
-my $DEFAULT = "Opt\t-Inf\t+Inf\t-1\t+1";
-
-while(<>) {
- next if /^#/;
- chomp;
- next if /^\s*$/;
- s/^\s+//;
- s/\s+$//;
- my ($a,$b) = split /\s+/;
- next unless ($a && $b);
- my $line = $DEFAULT;
- if ($defaults{$a}) { $line = $defaults{$a}; }
- print "$a\t|||\t$b\t$line\n";
-}
-
-print "normalization = none\n";
-
diff --git a/rescore/rerank.pl b/rescore/rerank.pl
deleted file mode 100755
index 4a0c5750..00000000
--- a/rescore/rerank.pl
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use utf8;
-use Getopt::Long;
-
-my $weights_file;
-my $hyp_file;
-my $help;
-my $kbest; # flag to extract reranked list
-
-Getopt::Long::Configure("no_auto_abbrev");
-if (GetOptions(
- "weights_file|w=s" => \$weights_file,
- "hypothesis_file|h=s" => \$hyp_file,
- "kbest" => \$kbest,
- "help" => \$help,
-) == 0 || @ARGV!=0 || $help || !$weights_file || !$hyp_file) {
- usage();
- exit(1);
-}
-
-open W, "<$weights_file" or die "Can't read $weights_file: $!";
-my %weights;
-while(<W>) {
- chomp;
- next if /^#/;
- next if /^\s*$/;
- my ($fname, $w) = split /\s+/;
- $weights{$fname} = $w;
-}
-close W;
-
-my $cur = undef;
-my %hyps = ();
-open HYP, "<$hyp_file" or die "Can't read $hyp_file: $!";
-while(<HYP>) {
- chomp;
- my ($id, $hyp, $feats) = split / \|\|\| /;
- unless (defined $cur) { $cur = $id; }
- if ($cur ne $id) {
- extract_1best($cur, \%hyps);
- $cur = $id;
- %hyps = ();
- }
- my @afeats = split /\s+/, $feats;
- my $tot = 0;
- for my $featpair (@afeats) {
- my ($fname,$fval) = split /=/, $featpair;
- my $weight = $weights{$fname};
- die "Unweighted feature '$fname'" unless defined $weight;
- $tot += ($weight * $fval);
- }
- $hyps{"$hyp ||| $feats"} = $tot;
-}
-extract_1best($cur, \%hyps) if defined $cur;
-close HYP;
-
-sub extract_1best {
- my ($id, $rh) = @_;
- my %hyps = %$rh;
- if ($kbest) {
- for my $hyp (sort { $hyps{$b} <=> $hyps{$a} } keys %hyps) {
- print "$id ||| $hyp\n";
- }
- } else {
- my $best_score = undef;
- my $best_hyp = undef;
- for my $hyp (keys %hyps) {
- if (!defined $best_score || $hyps{$hyp} > $best_score) {
- $best_score = $hyps{$hyp};
- $best_hyp = $hyp;
- }
- }
- $best_hyp =~ s/ \|\|\|.*$//;
- print "$best_hyp\n";
- }
-}
-
-sub usage {
- print <<EOT;
-Usage: $0 -w weights.txt -h hyp.nbest.txt [--kbest]
- Reranks n-best lists with new weights, extracting the new 1/k-best entries.
-EOT
-}
-
diff --git a/rescore/rescore_inv_model1.pl b/rescore/rescore_inv_model1.pl
deleted file mode 100755
index 780452f5..00000000
--- a/rescore/rescore_inv_model1.pl
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use utf8;
-use Getopt::Long;
-
-my $model_file;
-my $src_file;
-my $hyp_file;
-my $help;
-my $reverse_model;
-my $feature_name='M1SrcGivenTrg';
-
-Getopt::Long::Configure("no_auto_abbrev");
-if (GetOptions(
- "model_file|m=s" => \$model_file,
- "source_file|s=s" => \$src_file,
- "feature_name|f=s" => \$feature_name,
- "hypothesis_file|h=s" => \$hyp_file,
- "help" => \$help,
-) == 0 || @ARGV!=0 || $help || !$model_file || !$src_file || !$hyp_file) {
- usage();
- exit;
-}
-
-binmode STDIN, ":utf8";
-binmode STDOUT, ":utf8";
-binmode STDERR, ":utf8";
-
-print STDERR "Reading Model 1 probabilities from $model_file...\n";
-open M, "<$model_file" or die "Couldn't read $model_file: $!";
-binmode M, ":utf8";
-my %m1;
-while(<M>){
- chomp;
- my ($e,$f,$lp) = split /\s+/;
- die unless defined $e;
- die unless defined $f;
- die unless defined $lp;
- $m1{$f}->{$e} = $lp;
-}
-close M;
-
-open SRC, "<$src_file" or die "Can't read $src_file: $!";
-open HYP, "<$hyp_file" or die "Can't read $hyp_file: $!";
-binmode(SRC,":utf8");
-binmode(HYP,":utf8");
-binmode(STDOUT,":utf8");
-my @source; while(<SRC>){chomp; push @source, $_; }
-close SRC;
-my $src_len = scalar @source;
-print STDERR "Read $src_len sentences...\n";
-print STDERR "Rescoring...\n";
-
-my $cur = undef;
-my @hyps = ();
-my @feats = ();
-while(<HYP>) {
- chomp;
- my ($id, $hyp, $feats) = split / \|\|\| /;
- unless (defined $cur) { $cur = $id; }
- die "sentence ids in k-best list file must be between 0 and $src_len" if $id < 0 || $id > $src_len;
- if ($cur ne $id) {
- rescore($cur, $source[$cur], \@hyps, \@feats);
- $cur = $id;
- @hyps = ();
- @feats = ();
- }
- push @hyps, $hyp;
- push @feats, $feats;
-}
-rescore($cur, $source[$cur], \@hyps, \@feats) if defined $cur;
-
-sub rescore {
- my ($id, $src, $rh, $rf) = @_;
- my @hyps = @$rh;
- my @feats = @$rf;
- my $nhyps = scalar @hyps;
- my %cache = ();
- print STDERR "RESCORING SENTENCE id=$id (# hypotheses=$nhyps)...\n";
- for (my $i=0; $i < $nhyps; $i++) {
- my $score = $cache{$hyps[$i]};
- if (!defined $score) {
- if ($reverse_model) {
- die "not implemented";
- } else {
- $score = m1_prob($src, $hyps[$i]);
- }
- $cache{$hyps[$i]} = $score;
- }
- print "$id ||| $hyps[$i] ||| $feats[$i] $feature_name=$score\n";
- }
-
-}
-
-sub m1_prob {
- my ($fsent, $esent) = @_;
- die unless defined $fsent;
- die unless defined $esent;
- my @fwords = split /\s+/, $fsent;
- my @ewords = split /\s+/, $esent;
- push @ewords, "<eps>";
- my $tp = 0;
- for my $f (@fwords) {
- my $m1f = $m1{$f};
- if (!defined $m1f) { $m1f = {}; }
- my $tfp = 0;
- for my $e (@ewords) {
- my $lp = $m1f->{$e};
- if (!defined $lp) { $lp = -100; }
- #print "P($f|$e) = $lp\n";
- my $prob = exp($lp);
- #if ($prob > $tfp) { $tfp = $prob; }
- $tfp += $prob;
- }
- $tp += log($tfp);
- $tp -= log(scalar @ewords); # uniform probability of each generating word
- }
- return $tp;
-}
-
-sub usage {
- print STDERR "Usage: $0 -m model_file.txt -h hypothesis.nbest -s source.txt\n Adds the back-translation probability under Model 1\n Use training/model1 to generate the required parameter file\n";
-}
-
-
diff --git a/rescore/rescore_with_cdec_model.pl b/rescore/rescore_with_cdec_model.pl
deleted file mode 100755
index cdd8c217..00000000
--- a/rescore/rescore_with_cdec_model.pl
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use utf8;
-my @ORIG_ARGV=@ARGV;
-use Cwd qw(getcwd);
-my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR, "$SCRIPT_DIR/../environment"; }
-use LocalConfig;
-use Getopt::Long;
-use IPC::Open2;
-use POSIX ":sys_wait_h";
-
-my $decoder = "$SCRIPT_DIR/../decoder/cdec";
-my $help;
-my $cdec_ini;
-my $src_file;
-my $hyp_file;
-my $reverse_model;
-my $weights_file;
-my $feature_name='NewModel';
-
-sub catch_pipe {
- my $signame = shift;
- die "$0 received SIGPIPE: did the decoder die?\n";
-}
-$SIG{PIPE} = \&catch_pipe;
-
-Getopt::Long::Configure("no_auto_abbrev");
-if (GetOptions(
- "config|c=s" => \$cdec_ini,
- "weights|w=s" => \$weights_file,
- "source_file|s=s" => \$src_file,
- "feature_name|f=s" => \$feature_name,
- "hypothesis_file|h=s" => \$hyp_file,
- "reverse" => \$reverse_model, # if true translate hyp -> src
- "decoder=s" => \$decoder,
- "help" => \$help,
-) == 0 || @ARGV!=0 || $help || !$cdec_ini || !$src_file || !$hyp_file) {
- usage();
- exit;
-}
-die "Can't find $decoder" unless -f $decoder;
-die "Can't run $decoder" unless -x $decoder;
-my $weights = '';
-if (defined $weights_file) {
- die "Can't read $weights_file" unless -f $weights_file;
- $weights = "-w $weights_file";
-}
-my $decoder_command = "$decoder -c $cdec_ini --quiet $weights --show_conditional_prob";
-print STDERR "DECODER COMMAND: $decoder_command\n";
-my $cdec_pid = open2(\*CDEC_IN, \*CDEC_OUT, $decoder_command)
- or die "Couldn't run $decoder: $!";
-sleep 1;
-
-die "Can't find $cdec_ini" unless -f $cdec_ini;
-open SRC, "<$src_file" or die "Can't read $src_file: $!";
-open HYP, "<$hyp_file" or die "Can't read $hyp_file: $!";
-binmode(SRC,":utf8");
-binmode(HYP,":utf8");
-binmode(STDOUT,":utf8");
-my @source; while(<SRC>){chomp; push @source, $_; }
-close SRC;
-my $src_len = scalar @source;
-print STDERR "Read $src_len sentences...\n";
-binmode(CDEC_IN, ":utf8");
-binmode(CDEC_OUT, ":utf8");
-
-my $cur = undef;
-my @hyps = ();
-my @feats = ();
-while(<HYP>) {
- chomp;
- my ($id, $hyp, $feats) = split / \|\|\| /;
- unless (defined $cur) { $cur = $id; }
- die "sentence ids in k-best list file must be between 0 and $src_len" if $id < 0 || $id > $src_len;
- if ($cur ne $id) {
- rescore($cur, $source[$cur], \@hyps, \@feats);
- $cur = $id;
- @hyps = ();
- @feats = ();
- }
- push @hyps, $hyp;
- push @feats, $feats;
-}
-rescore($cur, $source[$cur], \@hyps, \@feats) if defined $cur;
-
-close CDEC_IN;
-close CDEC_OUT;
-close HYP;
-waitpid($cdec_pid, 0);
-my $status = $? >> 8;
-if ($status != 0) {
- print STDERR "Decoder returned bad status!\n";
-}
-
-sub rescore {
- my ($id, $src, $rh, $rf) = @_;
- my @hyps = @$rh;
- my @feats = @$rf;
- my $nhyps = scalar @hyps;
- print STDERR "RESCORING SENTENCE id=$id (# hypotheses=$nhyps)...\n";
- for (my $i=0; $i < $nhyps; $i++) {
- if ($reverse_model) {
- print CDEC_OUT "<seg id=\"$id\">$hyps[$i] ||| $src</seg>\n";
- } else {
- print CDEC_OUT "<seg id=\"$id\">$src ||| $hyps[$i]</seg>\n";
- }
- my $score = <CDEC_IN>;
- chomp $score;
- my @words = split /\s+/, $hyps[$i];
- print "$id ||| $hyps[$i] ||| $feats[$i] $feature_name=$score\n";
- }
-}
-
-sub usage {
- print <<EOT;
-Usage: $0 -c cdec.ini [-w cdec_weights.txt] -s source.txt -h hypothesis.nbest.txt [-f FeatureName]
-EOT
- exit 0
-}
-