blob: afa1e19ac28fcd3083c82d472c3e3536895b5d14 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
|
#include <sstream>
#include <iostream>
#include <set>
#include "contexts_corpus.hh"
#include "gzstream.hh"
#include "contexts_lexer.h"
using namespace std;
//////////////////////////////////////////////////
// ContextsCorpus
//////////////////////////////////////////////////
void read_callback(const ContextsLexer::PhraseContextsType& new_contexts, void* extra) {
assert(new_contexts.contexts.size() == new_contexts.counts.size());
std::pair<ContextsCorpus*, BackoffGenerator*>* extra_pair
= static_cast< std::pair<ContextsCorpus*, BackoffGenerator*>* >(extra);
ContextsCorpus* corpus_ptr = extra_pair->first;
BackoffGenerator* backoff_gen = extra_pair->second;
Document* doc(new Document());
//std::cout << "READ: " << new_contexts.phrase << "\t";
for (int i=0; i < new_contexts.contexts.size(); ++i) {
int cache_word_count = corpus_ptr->m_dict.max();
WordID id = corpus_ptr->m_dict.Convert(new_contexts.contexts[i]);
if (cache_word_count != corpus_ptr->m_dict.max()) {
corpus_ptr->m_backoff->terms_at_level(0)++;
corpus_ptr->m_num_types++;
}
int count = new_contexts.counts[i];
for (int j=0; j<count; ++j)
doc->push_back(id);
corpus_ptr->m_num_terms += count;
// generate the backoff map
if (backoff_gen) {
int order = 1;
WordID backoff_id = id;
ContextsLexer::Context backedoff_context = new_contexts.contexts[i];
while (true) {
if (!corpus_ptr->m_backoff->has_backoff(backoff_id)) {
//std::cerr << "Backing off from " << corpus_ptr->m_dict.Convert(backoff_id) << " to ";
backedoff_context = (*backoff_gen)(backedoff_context);
if (backedoff_context.empty()) {
//std::cerr << "Nothing." << std::endl;
(*corpus_ptr->m_backoff)[backoff_id] = -1;
break;
}
if (++order > corpus_ptr->m_backoff->order())
corpus_ptr->m_backoff->order(order);
int cache_word_count = corpus_ptr->m_dict.max();
int new_backoff_id = corpus_ptr->m_dict.Convert(backedoff_context);
if (cache_word_count != corpus_ptr->m_dict.max())
corpus_ptr->m_backoff->terms_at_level(order-1)++;
//std::cerr << corpus_ptr->m_dict.Convert(new_backoff_id) << " ." << std::endl;
backoff_id = ((*corpus_ptr->m_backoff)[backoff_id] = new_backoff_id);
}
else break;
}
}
//std::cout << context_str << " (" << id << ") ||| C=" << count << " ||| ";
}
//std::cout << std::endl;
corpus_ptr->m_documents.push_back(doc);
}
unsigned ContextsCorpus::read_contexts(const std::string &filename,
BackoffGenerator* backoff_gen_ptr) {
m_num_terms = 0;
m_num_types = 0;
igzstream in(filename.c_str());
std::pair<ContextsCorpus*, BackoffGenerator*> extra_pair(this,backoff_gen_ptr);
ContextsLexer::ReadContexts(&in,
read_callback,
&extra_pair);
//m_num_types = m_dict.max();
std::cerr << "Read backoff with order " << m_backoff->order() << "\n";
for (int o=0; o<m_backoff->order(); o++)
std::cerr << " Terms at " << o << " = " << m_backoff->terms_at_level(o) << std::endl;
std::cerr << std::endl;
return m_documents.size();
}
|