diff options
Diffstat (limited to 'extractor')
-rw-r--r-- | extractor/precomputation.cc | 2 | ||||
-rw-r--r-- | extractor/rule_extractor_helper.cc | 2 |
2 files changed, 0 insertions, 4 deletions
diff --git a/extractor/precomputation.cc b/extractor/precomputation.cc index e29018c2..0fadc95c 100644 --- a/extractor/precomputation.cc +++ b/extractor/precomputation.cc @@ -69,8 +69,6 @@ vector<vector<int> > Precomputation::FindMostFrequentPatterns( for (size_t i = 1; i < lcp.size(); ++i) { for (int len = lcp[i]; len < max_frequent_phrase_len; ++len) { int frequency = i - run_start[len]; - // TODO(pauldb): Only add patterns that don't span across multiple - // sentences. if (frequency >= min_frequency) { heap.push(make_pair(frequency, make_pair(suffix_array->GetSuffix(run_start[len]), len + 1))); diff --git a/extractor/rule_extractor_helper.cc b/extractor/rule_extractor_helper.cc index 553b56d4..d9ed6a7e 100644 --- a/extractor/rule_extractor_helper.cc +++ b/extractor/rule_extractor_helper.cc @@ -35,8 +35,6 @@ void RuleExtractorHelper::GetLinksSpans( source_low = vector<int>(source_sent_len, -1); source_high = vector<int>(source_sent_len, -1); - // TODO(pauldb): Adam Lopez claims this part is really inefficient. See if we - // can speed it up. target_low = vector<int>(target_sent_len, -1); target_high = vector<int>(target_sent_len, -1); vector<pair<int, int> > links = alignment->GetLinks(sentence_id); |