- require 'zipf'
!!!
%html
%head
%title Debug view (Session ##{session_key})
%link(rel="stylesheet" type="text/css" href="debug.css")
%script{:src =>"http://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js", :charset=>"utf-8"}
%script{:src => "http://postedit.cl.uni-heidelberg.de/js/debug.js"}
%body
%h1 Debug view
%h2 Session [##{session_key}]
- if pairwise_ranking_data["kbest"].empty?
%p.red
%strong No data to show!
%ul
%li
%a{ :href => "#controls" } Controls
%li
%a{ :href => "#post_edit" } Post-edit & machine translation
%li
%a{ :href => "#meta" } Meta
%li
%a{ :href => "#rules" } Learned rules
%li
%a{ :href => "#pairwise_ranking" } Pairwise ranking
%hr
/=#########################################################################
%h2#controls Controls
%h3 Reset
%p
%strong [Server reply]
%span#control_reply
%ul
%li
%a.ajax{:tgt => "/reset_progress", :href => "#controls"} Reset progress
%li
%a.ajax{:tgt => "/reset_weights", :href => "#controls"} Reset weights
%li
%a.ajax{:tgt => "/reset_extractor", :href => "#controls"} Reset extractor
%li
%a.ajax{:tgt => "/reset_new_rules", :href => "#controls"} Reset new rules
%li
%a.ajax{:tgt => "/shutdown", :href => "#controls"} Initiate shutdown
%h3 Learning rate
%p Default for dense features: 1.0e-05, for sparse features: 1.0e-05
%select
- [1000,100,10,1,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001,0.00000001,0.000000001,0.0000000001].each do |i|
%option.ajax{:value => i, :tgt => "/set_learning_rate/#{i}"} #{i}
%em dense features
%select
- [1000,100,10,1,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001,0.00000001,0.000000001,0.0000000001].each do |i|
%option.ajax{:value => i, :tgt => "/set_learning_rate/sparse/#{i}"} #{i}
%em sparse features
%p
%a{ :href => "#" } ^ up
%hr
/=#########################################################################
%h2#post_edit Post-edit & machine translation
%p#original_svg_b64 #{data["original_svg"][progress]}
%p#svg_b64 #{data["svg"][progress]}
%div#original_svg
%div#svg
%table
%tr
%td.noborder
%strong progress:
%td.left #{[0,progress].max}
%tr
%td.noborder
%strong MT Input
%td.left #{data["source_segments"][[0,progress].max]}
%tr
%td.noborder
%strong Raw source
%td.left #{data["raw_source_segments"][[0,progress].max]}
%tr
%td.noborder
%strong Post-edit
%td.left #{data["post_edits_raw"][progress]}
%tr
%td.noborder
%strong Post-edit (processed)
%td.left #{data["post_edits"][progress]}
%tr
%td.noborder
%strong Original MT
%td.left #{data["mt_raw"][progress]}
%tr
%td.noborder
%strong Displayed MT
%td.left #{data["mt"][progress]}
%tr
%td.noborder
%strong Best match (BLEU=#{(pairwise_ranking_data["best_match_score"]*100).round(2)}%)
%td.left #{pairwise_ranking_data["best_match"]}
%h2 Derivation
%pre #{data["derivations"][progress]}
/= %h3 Processed
/= - if data["derivations_proc"][progress]
/= %pre #{JSON.pretty_generate(JSON.parse(data["derivations_proc"][progress]))}
%h2 Client reply
- if data["feedback"][progress]
%pre #{JSON.pretty_generate(JSON.parse(data["feedback"][progress]))}
%p
%a{ :href => "#" } ^ up
%hr
/=#########################################################################
%h2#meta Meta
%p k: #{pairwise_ranking_data["samples_size"]}
%p number of updates: #{pairwise_ranking_data["num_up"]}
%p updated features: #{pairwise_ranking_data["updated_features"]}
%p learning rate: #{pairwise_ranking_data["learning_rate"]}
%p learning rate (sparse): #{pairwise_ranking_data["learning_rate_sparse"]}
%p duration: #{data["durations"][progress]}ms
%p updated: #{data["updated"][progress]}
%p
%a{ :href => "#" } ^ up
%hr
/=#########################################################################
%h2#rules New & known rules
%pre #{additional_rules.join("\n")}
%h3
Known rules
%pre #{rejected_rules.join("\n")}
%p
%a{ :href => "#" } ^ up
%hr
/=#########################################################################
%h2#pairwise_ranking Pairwise ranking updates
%h3 K-best list
%p (Empty or list from previous updates displayed if there was no change in the current post-edit.)
%pre [BLEU score | Model score | Original rank | \|e\| | Translation \n Features]
%p.red In red: Update needed, i.e. "any of the above hypotheses has a lower model score"
%ol
- kbest = []
- pairwise_ranking_data["kbest"].each { |i| x=splitpipe(i); kbest << [ x[0].to_f, x[1].to_f, x[2].to_i, x[3], x[4] ] }
- kbest.sort! { |i,j| j[0] <=> i[0] }
- kbest.each_with_index do |k,j|
- b = kbest[0,j].map { |l| l[0]>k[0] && l[1]
%p{:style=>"font-size:80%"} #{k[3]}
- if [9,89].include? j
%hr
%h3 Weight updates
%p Raw update: #{pairwise_ranking_data["update_raw"]}
/=%p Update (learning rates applied):
#{pairwise_ranking_data["update"]}/=- "#{pairwise_ranking_data["update"]}" %table %tr %th Feature %th Before %th After %th Diff. %th Raw diff. - pairwise_ranking_data["weights_before"].default = 0 - pairwise_ranking_data["weights_after"].keys.each.sort { |a,b| a[0] <=> b[0] }.each do |k| %tr %td.noborder #{k} %td #{"%+.3f"%pairwise_ranking_data["weights_before"][k].round(4)} %td #{"%+.3f"%pairwise_ranking_data["weights_after"][k].round(4)} - diff = pairwise_ranking_data["weights_after"][k]-pairwise_ranking_data["weights_before"][k] - if diff < 0 %td.red #{"%+.3f"%(diff).round(4)} - elsif diff > 0 %td.green #{"%+.3f"%(diff).round(4)} - else %td #{"%+.3f"%(diff).round(4)} - if !k.start_with? "R:" %td #{"%+.1f"%((pairwise_ranking_data["weights_after"][k]-pairwise_ranking_data["weights_before"][k])/pairwise_ranking_data["learning_rate"]).round(2)} - else %td #{"%+.1f"%((pairwise_ranking_data["weights_after"][k]-pairwise_ranking_data["weights_before"][k])/pairwise_ranking_data["learning_rate_sparse"]).round(2)} %h3 Features explained %table %tr %td.noborder EgivenFCoherent (rule) %td.left -log10[ c(e, f) / sample c(f) ] %tr %td.noborder NewRule (rule) %td.left Only feature of additional rules, weight fixed at 1 %tr %td.noborder KnownRule (rule) %td.left Added to existing rules if they could be extracted from previous post-edits %tr %td.noborder OOVFix (rule) %td.left Manually added rules to fix OOV words %tr %td.noborder Glue %td.left Absolute number of rules used from glue grammar %tr %td.noborder IsSingletonF/E (rule) %td.left true|false (1|0) (sum) %tr %td.noborder IsSingletonFE (rule) %td.left true|false (1|0) (sum) %tr %td.noborder LanguageModel %td.left -log10[ score ] %tr %td.noborder LanguageModel_OOV %td.left Abs. count of OOV unigrams %tr %td.noborder MaxLexFgivenE (rule) %td.left Sum_f -log10(maxScore) (maxScore = max_e(ttable(f)) %tr %td.noborder MaxLexEgivenF (rule) %td.left Sum_e -log10(maxScore) (maxScore = max_f(ttable(e)) %tr %td.noborder PassThrough %td.left Absolute count of used PassThrough rules (1 per word) %tr %td.noborder SampleCountF (rule) %td.left log10 [ sample c(f) ] %tr %td.noborder WordPenalty %td.left log_10(e)*|e| = 1/log(10) * |e| (*-1) = -0.43429448190325176*|e| %tr %td.noborder SourceWordPenalty (per edge/rule) %td.left ^^^ (|e| <=> |f|) %tr %td.noborder R:* %td.left Rule indicator features, sum over full derivation per rule %tr %td.noborder Shape_* %td.left Indicator features for rule shapes %tr %td.noborder IsSupportedOnline %td.left Counts how many rules have support from local context (Denkowski) %p %a{ :href => "#" } ^ up