- require 'zipf'
!!!
%html
%head
%title debug view (session ##{session_key})
%link(rel="stylesheet" type="text/css" href="debug.css")
%script{:src =>"http://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js", :charset=>"utf-8"}
%script{:src => "http://postedit.cl.uni-heidelberg.de/js/debug.js"}
%body
%h1 debug view
%h2 session ##{session_key}
- if data["kbest"].empty?
%p.red
%strong No data to show!
%ul
%li
%a.ajax{:tgt => "/reset_progress", :href => "#"} reset progress
%li
%a.ajax{:tgt => "/reset_weights", :href => "#"} reset weights
%li
%a.ajax{:tgt => "/reset_extractor", :href => "#"} reset extractor
%li
%a.ajax{:tgt => "/reset_add_rules", :href => "#"} reset add. rules
%li
%a.ajax{:tgt => "/shutdown", :href => "#"} shutdown
%p#ajax_result
%p
%strong learning rate:
%select
- [1000,100,10,1,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001,0.00000001,0.000000001,0.0000000001].each do |i|
%option{:value => i, :onclick => "window.open(\"http://\"+window.location.host+\"/set_learning_rate/#{i}\");"} #{i}
%span | sparse features:
%select
- [1000,100,10,1,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001,0.00000001,0.000000001,0.0000000001].each do |i|
%option{:value => i, :onclick => "window.open(\"http://\"+window.location.host+\"/set_sparse_learning_rate/#{i}\");"} #{i}
%table
%tr
%td.noborder
%strong progress:
%td.left #{[0,data2["progress"]-1].max}
%tr
%td.noborder
%strong source:
%td.left #{data2["source_segments"][[0,data2["progress"]-1].max]}
%tr
%td.noborder
%strong source (original):
%td.left #{data2["raw_source_segments"][[0,data2["progress"]-1].max]}
%tr
%td.noborder
%strong post-edit:
%td.left #{data2["post_edits_raw"][data2["progress"]-1]}
%tr
%td.noborder
%strong post-edit (processed):
%td.left #{data2["post_edits"][data2["progress"]-1]}
%tr
%td.noborder
%strong original mt:
%td.left #{data2["mt_raw"][data2["progress"]-1]}
%tr
%td.noborder
%strong shown mt:
%td.left #{data2["mt"][data2["progress"]-1]}
%tr
%td.noborder
%strong best match (bleu=#{data["best_match_score"]}):
%td.left #{data["best_match"]}
%h2 derivation
%pre #{data2["derivations"][data2["progress"]-1]}
%p
%strong processed
- if data2["derivations_proc"][data2["progress"]-1]
%pre #{JSON.pretty_generate(JSON.parse(data2["derivations_proc"][data2["progress"]-1]))}
%h2 manual update
- if data2["feedback"][data2["progress"]-1]
%pre #{JSON.pretty_generate(JSON.parse(data2["feedback"][data2["progress"]-1]))}
%h2 meta
%p k: #{data["samples_size"]}
%p number of updates: #{data["num_up"]}
%p updated features: #{data["updated_features"]}
%p learning rate: #{data["learning_rate"]}
%p learning rate (sparse): #{data["learning_rate_sparse"]}
%p duration: #{data2["durations"][data2["progress"].to_i-1]}ms
%p updated: #{data2["updated"][data2["progress"].to_i-1]}
%h2
additional rules (forced rules)
%pre #{additional_rules.join("\n")}
%h2 k-best
%p bleu | model score | original rank | \|e\| | translation \n features
%p.red update needed ("any of the above hypotheses has a lower model score")
%ol
- kbest = []
- data["kbest"].each { |i| x=splitpipe(i); kbest << [ x[0].to_f, x[1].to_f, x[2].to_i, x[3], x[4] ] }
- kbest.sort! { |i,j| j[0] <=> i[0] }
- kbest.each_with_index do |k,j|
- b = kbest[0,j].map { |l| l[0]>k[0] && l[1]
%p{:style=>"font-size:80%"} #{k[3]}
- else
%li
%strong #{"%.2f"%(k[0].to_f*100)} | #{k[1]} | #{k[2]} | #{k[4].split.size} | #{k[4]}
%p{:style=>"font-size:80%"} #{k[3]}
- if [9,89].include? j
%hr
%h2 weight updates
%table
%tr
%th feature
%th before
%th after
%th diff
%th raw diff
- data["weights_before"].default = 0
- data["weights_after"].keys.each.sort { |a,b| a[0] <=> b[0] }.each do |k|
%tr
%td.noborder #{k}
%td #{"%+.3f"%data["weights_before"][k].round(4)}
%td #{"%+.3f"%data["weights_after"][k].round(4)}
- diff = data["weights_after"][k]-data["weights_before"][k]
- if diff < 0
%td.red #{"%+.3f"%(diff).round(4)}
- elsif diff > 0
%td.green #{"%+.3f"%(diff).round(4)}
- else
%td #{"%+.3f"%(diff).round(4)}
- if !k.start_with? "R:"
%td #{"%+.1f"%((data["weights_after"][k]-data["weights_before"][k])/data["learning_rate"]).round(2)}
- else
%td #{"%+.1f"%((data["weights_after"][k]-data["weights_before"][k])/data["learning_rate_sparse"]).round(2)}
%h3 features explained
%table
%tr
%td.noborder EgivenFCoherent (rule)
%td.left -log10[ c(e, f) / sample c(f) ]
%tr
%td.noborder ForceRule (rule)
%td.left only feature of additional rules, weight fixed at 1
%tr
%td.noborder Glue
%td.left absolute number of rules used from glue grammar
%tr
%td.noborder IsSingletonF/E (rule)
%td.left true|false (1|0) (sum)
%tr
%td.noborder IsSingletonFE (rule)
%td.left true|false (1|0) (sum)
%tr
%td.noborder LanguageModel
%td.left -log10[ score ]
%tr
%td.noborder LanguageModel_OOV
%td.left abs. count of OOV unigrams
%tr
%td.noborder MaxLexFgivenE (rule)
%td.left Sum_f -log10(maxScore) (maxScore = max_e(ttable(f))
%tr
%td.noborder MaxLexEgivenF (rule)
%td.left Sum_e -log10(maxScore) (maxScore = max_f(ttable(e))
%tr
%td.noborder PassThrough
%td.left absolute count of used PassThrough rules (1 per word)
%tr
%td.noborder SampleCountF (rule)
%td.left log10 [ sample c(f) ]
%tr
%td.noborder WordPenalty
%td.left log_10(e)*|e| = 1/log(10) * |e| (*-1) = -0.43429448190325176*|e|
%tr
%td.noborder SourceWordPenalty (per edge/rule)
%td.left ^^^ (|e| <=> |f|)
%tr
%td.noborder R:*
%td.left rule indicator features, sum over full derivation per rule
%tr
%td.noborder Shape_*
%td.left indicator features for rule shapes
%tr
%td.noborder IsSupportedOnline
%td.left counts how many rules have support from local context (Denkowski)