Implement getBytes()

This commit is contained in:
2024-05-10 12:10:00 -07:00
parent c65d874c07
commit f5920ba6c7
5 changed files with 469 additions and 36 deletions

View File

@@ -1,2 +1,2 @@
CompileFlags:
Add: [-DENABLE_MAIN, -UNDEBUG, -DENABLE_FUZZ, -DTHREAD_TEST, -fexceptions, -DDEBUG_VERBOSE=1, -DENABLE_ROOTSET_TESTS]
Add: [-DENABLE_MAIN, -UNDEBUG, -DENABLE_FUZZ, -DTHREAD_TEST, -fexceptions, -DDEBUG_VERBOSE=1, -DSHOW_MEMORY=1, -DENABLE_ROOTSET_TESTS]

388
Internal.h Normal file
View File

@@ -0,0 +1,388 @@
#pragma once
#include <new>
#include <algorithm>
#include <assert.h>
#include <bit>
#include <span>
#include <stdlib.h>
#include <string.h>
#ifndef SHOW_MEMORY
#define SHOW_MEMORY 0
#endif
#ifndef DEBUG_VERBOSE
#define DEBUG_VERBOSE 0
#endif
#if DEBUG_VERBOSE
// Use to toggle debug verbose dynamically
inline bool debugVerboseEnabled = true;
#endif
// This header contains code that we want to reuse outside of ConflictSet.cpp or
// want to exclude from coverage since it's only testing related.
// GCOVR_EXCL_START
#if SHOW_MEMORY
inline int64_t mallocBytes = 0;
inline int64_t peakMallocBytes = 0;
#endif
inline thread_local int64_t mallocBytesDelta = 0;
#ifndef NDEBUG
constexpr auto kMallocHeaderSize = 16;
#endif
// malloc that aborts on OOM and thus always returns a non-null pointer. Must be
// paired with `safe_free`.
__attribute__((always_inline)) inline void *safe_malloc(size_t s) {
mallocBytesDelta += s;
#if SHOW_MEMORY
mallocBytes += s;
if (mallocBytes > peakMallocBytes) {
peakMallocBytes = mallocBytes;
}
#endif
void *p = malloc(s
#ifndef NDEBUG
+ kMallocHeaderSize
#endif
);
if (p == nullptr) {
abort();
}
#ifndef NDEBUG
memcpy(p, &s, sizeof(s));
(char *&)p += kMallocHeaderSize;
#endif
return p;
}
// calloc that aborts on OOM and thus always returns a non-null pointer. Must be
// paired with `safe_free`.
__attribute__((always_inline)) inline void *safe_calloc(size_t count,
size_t size) {
size_t s = count * size;
mallocBytesDelta += s;
#if SHOW_MEMORY
mallocBytes += s;
if (mallocBytes > peakMallocBytes) {
peakMallocBytes = mallocBytes;
}
#endif
void *p = calloc(s
#ifndef NDEBUG
+ kMallocHeaderSize
#endif
,
1);
if (p == nullptr) {
abort();
}
#ifndef NDEBUG
memcpy(p, &s, sizeof(s));
(char *&)p += kMallocHeaderSize;
#endif
return p;
}
// Must be paired with `safe_malloc` or `safe_calloc`.
//
// There's nothing safer about this than free. Only called safe_free for
// symmetry with safe_malloc.
__attribute__((always_inline)) inline void safe_free(void *p, size_t s) {
mallocBytesDelta -= s;
#if SHOW_MEMORY
mallocBytes -= s;
#endif
#ifndef NDEBUG
(char *&)p -= kMallocHeaderSize;
size_t expected;
memcpy(&expected, p, sizeof(expected));
assert(s == expected);
#endif
free(p);
}
// ==================== BEGIN ARENA IMPL ====================
/// Group allocations with similar lifetimes to amortize the cost of malloc/free
struct Arena {
explicit Arena(int initialSize = 0);
/// O(log n) in the number of allocations
~Arena();
struct ArenaImpl;
Arena(const Arena &) = delete;
Arena &operator=(const Arena &) = delete;
Arena(Arena &&other) noexcept;
Arena &operator=(Arena &&other) noexcept;
ArenaImpl *impl = nullptr;
};
[[maybe_unused]] inline void operator delete(void *, std::align_val_t,
Arena &) {}
inline void *operator new(size_t size, std::align_val_t align, Arena &arena);
void *operator new(size_t size, std::align_val_t align, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete(void *, Arena &) {}
inline void *operator new(size_t size, Arena &arena) {
return operator new(size, std::align_val_t(alignof(std::max_align_t)), arena);
}
inline void *operator new(size_t size, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete[](void *, Arena &) {}
inline void *operator new[](size_t size, Arena &arena) {
return operator new(size, arena);
}
inline void *operator new[](size_t size, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete[](void *, std::align_val_t,
Arena &) {}
inline void *operator new[](size_t size, std::align_val_t align, Arena &arena) {
return operator new(size, align, arena);
}
inline void *operator new[](size_t size, std::align_val_t align,
Arena *arena) = delete;
/// align must be a power of two
template <class T> T *align_up(T *t, size_t align) {
assert(std::popcount(align) == 1);
auto unaligned = uintptr_t(t);
auto aligned = (unaligned + align - 1) & ~(align - 1);
return reinterpret_cast<T *>(reinterpret_cast<char *>(t) + aligned -
unaligned);
}
/// align must be a power of two
constexpr inline int align_up(uint32_t unaligned, uint32_t align) {
assert(std::popcount(align) == 1);
return (unaligned + align - 1) & ~(align - 1);
}
/// Returns the smallest power of two >= x
[[maybe_unused]] constexpr inline uint32_t nextPowerOfTwo(uint32_t x) {
return x <= 1 ? 1 : 1 << (32 - std::countl_zero(x - 1));
}
struct Arena::ArenaImpl {
Arena::ArenaImpl *prev;
int capacity;
int used;
uint8_t *begin() { return reinterpret_cast<uint8_t *>(this + 1); }
};
inline Arena::Arena(int initialSize) : impl(nullptr) {
if (initialSize > 0) {
auto allocationSize =
align_up(initialSize + sizeof(ArenaImpl), alignof(ArenaImpl));
impl = (Arena::ArenaImpl *)safe_malloc(allocationSize);
impl->prev = nullptr;
impl->capacity = allocationSize - sizeof(ArenaImpl);
impl->used = 0;
}
}
inline void onDestroy(Arena::ArenaImpl *impl) {
while (impl) {
auto *prev = impl->prev;
safe_free(impl, sizeof(Arena::ArenaImpl) + impl->capacity);
impl = prev;
}
}
[[maybe_unused]] inline Arena::Arena(Arena &&other) noexcept
: impl(std::exchange(other.impl, nullptr)) {}
[[maybe_unused]] inline Arena &Arena::operator=(Arena &&other) noexcept {
onDestroy(impl);
impl = std::exchange(other.impl, nullptr);
return *this;
}
inline Arena::~Arena() { onDestroy(impl); }
inline void *operator new(size_t size, std::align_val_t align, Arena &arena) {
int64_t aligned_size = size + size_t(align) - 1;
if (arena.impl == nullptr ||
(arena.impl->capacity - arena.impl->used) < aligned_size) {
auto allocationSize = align_up(
sizeof(Arena::ArenaImpl) +
std::max<int>(aligned_size,
(arena.impl ? std::max<int>(sizeof(Arena::ArenaImpl),
arena.impl->capacity * 2)
: 0)),
alignof(Arena::ArenaImpl));
auto *impl = (Arena::ArenaImpl *)safe_malloc(allocationSize);
impl->prev = arena.impl;
impl->capacity = allocationSize - sizeof(Arena::ArenaImpl);
impl->used = 0;
arena.impl = impl;
}
auto *result =
align_up(arena.impl->begin() + arena.impl->used, size_t(align));
auto usedDelta = (result - arena.impl->begin()) + size - arena.impl->used;
arena.impl->used += usedDelta;
return result;
}
/// STL-friendly allocator using an arena
template <class T> struct ArenaAlloc {
typedef T value_type;
ArenaAlloc() = delete;
explicit ArenaAlloc(Arena *arena) : arena(arena) {}
Arena *arena;
template <class U> constexpr ArenaAlloc(const ArenaAlloc<U> &other) noexcept {
arena = other.arena;
}
[[nodiscard]] T *allocate(size_t n) {
if (n > 0xfffffffffffffffful / sizeof(T)) { // NOLINT
__builtin_unreachable();
}
return static_cast<T *>((void *)new (std::align_val_t(alignof(T)), *arena)
uint8_t[n * sizeof(T)]); // NOLINT
}
void deallocate(T *, size_t) noexcept {}
};
template <class T, class U>
bool operator==(const ArenaAlloc<T> &lhs, const ArenaAlloc<U> &rhs) {
return lhs.arena == rhs.arena;
}
template <class T, class U>
bool operator!=(const ArenaAlloc<T> &lhs, const ArenaAlloc<U> &rhs) {
return !(lhs == rhs);
}
// ==================== END ARENA IMPL ====================
// ==================== BEGIN ARBITRARY IMPL ====================
/// Think of `Arbitrary` as an attacker-controlled random number generator.
/// Usually you want your random number generator to be fair, so that you can
/// sensibly analyze probabilities. E.g. The analysis that shows that quicksort
/// is expected O(n log n) with a random pivot relies on the random pivot being
/// selected uniformly from a fair distribution.
///
/// Other times you want your randomness to be diabolically unfair, like when
/// looking for bugs and fuzzing. The random-number-like interface is still
/// convenient here, but you can potentially get much better coverage by
/// allowing the possibility of e.g. flipping heads 100 times in a row.
///
/// When it runs out of entropy, it always returns 0.
struct Arbitrary {
Arbitrary() = default;
explicit Arbitrary(std::span<const uint8_t> bytecode) : bytecode(bytecode) {}
/// Draws an arbitrary uint32_t
uint32_t next() { return consume<4>(); }
/// Draws an arbitrary element from [0, s)
uint32_t bounded(uint32_t s);
/// Fill `bytes` with `size` arbitrary bytes
void randomBytes(uint8_t *bytes, int size) {
int toFill = std::min<int>(size, bytecode.size());
if (toFill > 0) {
memcpy(bytes, bytecode.data(), toFill);
}
bytecode = bytecode.subspan(toFill, bytecode.size() - toFill);
memset(bytes + toFill, 0, size - toFill);
}
/// Fill `bytes` with `size` random hex bytes
void randomHex(uint8_t *bytes, int size) {
for (int i = 0; i < size;) {
uint8_t arbitrary = consume<1>();
bytes[i++] = "0123456789abcdef"[arbitrary & 0xf];
arbitrary >>= 4;
if (i < size) {
bytes[i++] = "0123456789abcdef"[arbitrary & 0xf];
}
}
}
template <class T, class = std::enable_if_t<std::is_trivially_copyable_v<T>>>
T randT() {
T t;
randomBytes((uint8_t *)&t, sizeof(T));
return t;
}
bool hasEntropy() const { return bytecode.size() != 0; }
private:
uint8_t consumeByte() {
if (bytecode.size() == 0) {
return 0;
}
auto result = bytecode[0];
bytecode = bytecode.subspan(1, bytecode.size() - 1);
return result;
}
template <int kBytes> uint32_t consume() {
uint32_t result = 0;
static_assert(kBytes <= 4);
for (int i = 0; i < kBytes; ++i) {
result <<= 8;
result |= consumeByte();
}
return result;
}
std::span<const uint8_t> bytecode;
};
inline uint32_t Arbitrary::bounded(uint32_t s) {
if (s == 1) {
return 0;
}
switch (32 - std::countl_zero(s - 1)) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return consume<1>() % s;
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
case 16:
return consume<2>() % s;
case 17:
case 18:
case 19:
case 20:
case 21:
case 22:
case 23:
case 24:
return consume<3>() % s;
default:
return consume<4>() % s;
}
}
// ==================== END ARBITRARY IMPL ====================
// GCOVR_EXCL_STOP

View File

@@ -1,4 +1,5 @@
#include "RootSet.h"
#include "Internal.h"
#include <assert.h>
#include <inttypes.h>
@@ -9,10 +10,13 @@
struct RootSet::ThreadSafeHandle::Impl {
static int sizeForCapacity(int capacity) {
return sizeof(Impl) + sizeof(int64_t) * capacity +
sizeof(uint32_t) * capacity;
}
static Impl *create(int capacity) {
int size =
sizeof(Impl) + sizeof(int64_t) * capacity + sizeof(uint32_t) * capacity;
auto *result = (Impl *)malloc(size);
auto *result = (Impl *)safe_malloc(sizeForCapacity(capacity));
result->capacity = capacity;
return result;
}
@@ -62,9 +66,10 @@ struct RootSet::Impl {
auto *tmp = i;
i = i->next;
free(tmp);
safe_free(tmp, ThreadSafeHandle::Impl::sizeForCapacity(tmp->capacity));
}
free(handle.load(std::memory_order_relaxed));
auto h = handle.load(std::memory_order_relaxed);
safe_free(h, ThreadSafeHandle::Impl::sizeForCapacity(h->capacity));
}
void add(uint32_t node, int64_t version) {
@@ -116,7 +121,7 @@ struct RootSet::Impl {
1] <= oldestVersion) {
auto *tmp = firstToFree;
firstToFree = firstToFree->next;
free(tmp);
safe_free(tmp, ThreadSafeHandle::Impl::sizeForCapacity(tmp->capacity));
}
}
@@ -164,11 +169,11 @@ RootSet::ThreadSafeHandle RootSet::getThreadSafeHandle() const {
const uint32_t *RootSet::roots() const { return impl->roots(); }
int RootSet::rootCount() const { return impl->rootCount(); }
RootSet::RootSet() : impl(new(malloc(sizeof(Impl))) Impl()) {}
RootSet::RootSet() : impl(new(safe_malloc(sizeof(Impl))) Impl()) {}
RootSet::~RootSet() {
impl->~Impl();
free(impl);
safe_free(impl, sizeof(*impl));
}
#ifdef ENABLE_ROOTSET_TESTS

View File

@@ -1,4 +1,5 @@
#include "VersionedMap.h"
#include "Internal.h"
#include "RootSet.h"
#include <assert.h>
@@ -13,15 +14,6 @@
#include <unordered_set>
#include <xxhash.h>
#ifndef DEBUG_VERBOSE
#define DEBUG_VERBOSE 0
#endif
#if DEBUG_VERBOSE
// Use to toggle debug verbose dynamically
bool debugVerboseEnabled = true;
#endif
static_assert(std::is_standard_layout_v<weaselab::VersionedMap::MutationType>);
static_assert(std::is_standard_layout_v<weaselab::VersionedMap::Key>);
static_assert(std::is_standard_layout_v<weaselab::VersionedMap::Mutation>);
@@ -126,14 +118,15 @@ struct Entry {
}
#endif
if (--refCount == 0) {
free((void *)this);
safe_free((void *)this, sizeof(Entry) + keyLen + 1 + std::max(valLen, 0));
}
}
static Entry *make(int64_t pointVersion, int64_t rangeVersion,
const uint8_t *key, int keyLen, const uint8_t *val,
int valLen) {
auto e = (Entry *)malloc(sizeof(Entry) + keyLen + 1 + std::max(valLen, 0));
auto e =
(Entry *)safe_malloc(sizeof(Entry) + keyLen + 1 + std::max(valLen, 0));
e->pointVersion = pointVersion;
e->rangeVersion = rangeVersion;
e->keyLen = keyLen;
@@ -176,7 +169,7 @@ static_assert(kUpsizeNodes * sizeof(Node) == kUpsizeBytes);
struct BitSet {
explicit BitSet(uint32_t size)
: words((uint64_t *)calloc(size / 64 + 1, 8)) {}
: words((uint64_t *)safe_calloc(size / 64 + 1, 8)), size(size) {}
bool test(uint32_t i) const {
return words[i >> 6] & (uint64_t(1) << (i & 63));
@@ -207,13 +200,17 @@ struct BitSet {
}
}
~BitSet() { free(words); }
~BitSet() { safe_free(words, (size / 64 + 1) * 8); }
private:
uint32_t max_ = 0;
uint64_t *const words;
const uint32_t size;
};
int64_t mmapBytes = 0;
int64_t peakMmapBytes = 0;
struct MemManager {
MemManager()
: base((Node *)mmapSafe(nullptr, kMapSize, PROT_NONE,
@@ -248,6 +245,10 @@ struct MemManager {
mprotectSafe(base + firstUnaddressable, kUpsizeBytes,
PROT_READ | PROT_WRITE);
firstUnaddressable += kUpsizeNodes;
#if SHOW_MEMORY
mmapBytes = getBytes();
peakMmapBytes = std::max(peakMmapBytes, mmapBytes);
#endif
if (firstUnaddressable > kMapSize / sizeof(Node)) {
fprintf( // GCOVR_EXCL_LINE
stderr, // GCOVR_EXCL_LINE
@@ -344,6 +345,9 @@ struct MemManager {
(firstUnaddressable - newFirstUnaddressable) * sizeof(Node),
PROT_NONE);
firstUnaddressable = newFirstUnaddressable;
#if SHOW_MEMORY
mmapBytes = getBytes();
#endif
}
next = max + 1;
@@ -366,6 +370,10 @@ struct MemManager {
kMinAddressable, next);
}
int64_t getBytes() const {
return (firstUnaddressable - kMinAddressable) * sizeof(Node);
}
private:
uint32_t next = kMinAddressable;
uint32_t firstUnaddressable = kMinAddressable;
@@ -764,17 +772,22 @@ struct VersionedMap::Impl {
}
void setOldestVersion(int64_t oldestVersion) {
mallocBytesDelta = 0;
this->oldestVersion = oldestVersion;
roots.setOldestVersion(oldestVersion);
mm.gc(roots.roots(), roots.rootCount(), oldestVersion);
totalMallocBytes += mallocBytesDelta;
}
int64_t getBytes() const { return totalMallocBytes + mm.getBytes(); }
void printInOrder(int64_t version);
void printInOrderHelper(int64_t version, uint32_t node, int depth);
void addMutations(const Mutation *mutations, int numMutations,
int64_t version) {
mallocBytesDelta = 0;
// TODO scan to remove mutations older than oldestVersion
assert(latestVersion < version);
latestVersion = version;
@@ -805,6 +818,7 @@ struct VersionedMap::Impl {
}
}
roots.add(latestRoot, latestVersion);
totalMallocBytes += mallocBytesDelta;
}
void firstGeq(const Key *key, const int64_t *version, Iterator *iterator,
@@ -816,17 +830,25 @@ struct VersionedMap::Impl {
uint32_t latestRoot;
int64_t oldestVersion = 0;
int64_t latestVersion = 0;
int64_t totalMallocBytes = sizeof(Impl);
};
VersionedMap::VersionedMap(int64_t version)
: impl(new(malloc(sizeof(Impl))) Impl()) {
impl->latestVersion = version;
VersionedMap::Impl *internal_makeImpl(int64_t version) {
mallocBytesDelta = 0;
auto *result =
new (safe_malloc(sizeof(VersionedMap::Impl))) VersionedMap::Impl();
result->totalMallocBytes = mallocBytesDelta;
result->latestVersion = version;
return result;
}
VersionedMap::VersionedMap(int64_t version)
: impl(internal_makeImpl(version)) {}
VersionedMap::~VersionedMap() {
if (impl != nullptr) {
impl->~Impl();
free(impl);
safe_free(impl, sizeof(*impl));
}
}
@@ -864,20 +886,20 @@ struct VersionedMap::Iterator::Impl {
VersionedMap::Iterator::~Iterator() {
if (impl != nullptr) {
impl->~Impl();
free(impl);
safe_free(impl, sizeof(*impl));
}
}
VersionedMap::Iterator::Iterator(const Iterator &other)
: impl(new(malloc(sizeof(Impl))) Impl(*other.impl)) {}
: impl(new(safe_malloc(sizeof(Impl))) Impl(*other.impl)) {}
VersionedMap::Iterator &
VersionedMap::Iterator::operator=(const Iterator &other) {
if (impl != nullptr) {
impl->~Impl();
free(impl);
safe_free(impl, sizeof(*impl));
}
impl = new (malloc(sizeof(Impl))) Impl(*other.impl);
impl = new (safe_malloc(sizeof(Impl))) Impl(*other.impl);
return *this;
}
@@ -888,7 +910,7 @@ VersionedMap::Iterator &
VersionedMap::Iterator::operator=(Iterator &&other) noexcept {
if (impl != nullptr) {
impl->~Impl();
free(impl);
safe_free(impl, sizeof(*impl));
}
impl = std::exchange(other.impl, nullptr);
return *this;
@@ -1033,7 +1055,8 @@ void VersionedMap::Impl::firstGeq(const Key *key, const int64_t *version,
new (iterator[i].impl) Iterator::Impl();
} else {
root = handle.rootForVersion(version[i]);
iterator[i].impl = new (malloc(sizeof(Iterator::Impl))) Iterator::Impl();
iterator[i].impl =
new (safe_malloc(sizeof(Iterator::Impl))) Iterator::Impl();
}
auto finger = search<std::memory_order_acquire>(key[i], root, version[i]);
@@ -1090,7 +1113,7 @@ void VersionedMap::firstGeq(const Key *key, const int64_t *version,
VersionedMap::Iterator VersionedMap::begin(int64_t version) const {
VersionedMap::Iterator result;
result.impl = new (malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl = new (safe_malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl->cmp = 1;
bool ignored;
@@ -1129,7 +1152,7 @@ VersionedMap::Iterator VersionedMap::begin(int64_t version) const {
VersionedMap::Iterator VersionedMap::end(int64_t version) const {
VersionedMap::Iterator result;
result.impl = new (malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl = new (safe_malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl->cmp = 1;
result.impl->map = impl;
result.impl->mutationIndex = 0;
@@ -1145,7 +1168,7 @@ void VersionedMap::setOldestVersion(int64_t oldestVersion) {
impl->setOldestVersion(oldestVersion);
}
// TODO implement getBytes
int64_t VersionedMap::getBytes() const { return impl->getBytes(); }
// ==================== END IMPLEMENTATION ====================
@@ -1190,6 +1213,18 @@ VersionedMap::Impl *cast(const VersionedMap &m) {
return result;
}
#if SHOW_MEMORY
struct __attribute__((visibility("default"))) PeakPrinter {
~PeakPrinter() {
printf("--- versioned_map ---\n");
printf("malloc bytes: %g\n", double(mallocBytes));
printf("Peak malloc bytes: %g\n", double(peakMallocBytes));
printf("mmap bytes: %g\n", double(mmapBytes));
printf("Peak mmap bytes: %g\n", double(peakMmapBytes));
}
} peakPrinter;
#endif
} // namespace weaselab
#ifdef ENABLE_MAIN
@@ -1200,6 +1235,7 @@ void breakpoint_me() {}
int main() {
{
weaselab::VersionedMap versionedMap{0};
printf("Bytes: %" PRId64 "\n", versionedMap.getBytes());
{
weaselab::VersionedMap::Mutation m[] = {
{(const uint8_t *)"a", nullptr, 1, 0, weaselab::VersionedMap::Set},
@@ -1211,6 +1247,7 @@ int main() {
};
versionedMap.addMutations(m, sizeof(m) / sizeof(m[0]), 1);
}
printf("Bytes: %" PRId64 "\n", versionedMap.getBytes());
{
weaselab::VersionedMap::Mutation m[] = {
{(const uint8_t *)"a", (const uint8_t *)"d", 1, 1,
@@ -1230,7 +1267,9 @@ int main() {
weaselab::VersionedMap::Key k = {(const uint8_t *)"a", 2};
weaselab::VersionedMap::Iterator iter;
versionedMap.firstGeq(&k, &v, &iter, 1);
printf("Bytes: %" PRId64 "\n", versionedMap.getBytes());
versionedMap.setOldestVersion(2);
printf("Bytes: %" PRId64 "\n", versionedMap.getBytes());
breakpoint_me();
for (auto end = versionedMap.end(v); iter != end; ++iter) {
const auto &m = *iter;

View File

@@ -170,7 +170,8 @@ struct VersionedMap {
/** The "past-the-end" iterator. */
Iterator end(int64_t version) const;
/** Returns the memory usage in bytes. */
/** Returns the memory usage in bytes. Does not include memory used by
* iterators. */
int64_t getBytes() const;
/** Map starts with no mutations, with `getOldestVersion()` == `getVersion()`