summaryrefslogtreecommitdiff
path: root/klm/util/sized_iterator.hh
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2014-01-27 17:42:19 -0800
committerKenneth Heafield <github@kheafield.com>2014-01-27 17:42:19 -0800
commiteecc4cbf5a6e28f910130cd4a58444e9d4701ea9 (patch)
treecb32ace0bef145fd797c1d43f21768b111dbbcab /klm/util/sized_iterator.hh
parent5ab6eb44d67a48ea5b366d8b2878f3da7ef960e4 (diff)
KenLM 5cc905bc2d214efa7de2db56a9a672b749a95591
Diffstat (limited to 'klm/util/sized_iterator.hh')
-rw-r--r--klm/util/sized_iterator.hh14
1 files changed, 2 insertions, 12 deletions
diff --git a/klm/util/sized_iterator.hh b/klm/util/sized_iterator.hh
index dce8f229..a72657b5 100644
--- a/klm/util/sized_iterator.hh
+++ b/klm/util/sized_iterator.hh
@@ -36,7 +36,7 @@ class SizedInnerIterator {
void *Data() { return ptr_; }
std::size_t EntrySize() const { return size_; }
- friend inline void swap(SizedInnerIterator &first, SizedInnerIterator &second) {
+ friend void swap(SizedInnerIterator &first, SizedInnerIterator &second) {
std::swap(first.ptr_, second.ptr_);
std::swap(first.size_, second.size_);
}
@@ -69,17 +69,7 @@ class SizedProxy {
const void *Data() const { return inner_.Data(); }
void *Data() { return inner_.Data(); }
- /**
- // TODO: this (deep) swap was recently added. why? if any std heap sort etc
- // algs are using swap, that's going to be worse performance than using
- // =. i'm not sure why we *want* a deep swap. if C++11 compilers are
- // choosing between move constructor and swap, then we'd better implement a
- // (deep) move constructor. it may also be that this is moot since i made
- // ProxyIterator a reference and added a shallow ProxyIterator swap? (I
- // need Ken or someone competent to judge whether that's correct also. -
- // let me know at graehl@gmail.com
- */
- friend void swap(SizedProxy &first, SizedProxy &second) {
+ friend void swap(SizedProxy first, SizedProxy second) {
std::swap_ranges(
static_cast<char*>(first.inner_.Data()),
static_cast<char*>(first.inner_.Data()) + first.inner_.EntrySize(),