Files
versioned-map/VersionedMap.cpp

1050 lines
32 KiB
C++

#include "VersionedMap.h"
#include "RootSet.h"
#include <assert.h>
#include <atomic>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <unordered_set>
#include <xxhash.h>
#ifndef DEBUG_VERBOSE
#define DEBUG_VERBOSE 0
#endif
#if DEBUG_VERBOSE
// Use to toggle debug verbose dynamically
bool debugVerboseEnabled = true;
#endif
static_assert(std::bidirectional_iterator<weaselab::VersionedMap::Iterator>);
void *mmapSafe(void *addr, size_t len, int prot, int flags, int fd,
off_t offset) {
void *result = mmap(addr, len, prot, flags, fd, offset);
if (result == MAP_FAILED) {
int err = errno; // GCOVR_EXCL_LINE
fprintf( // GCOVR_EXCL_LINE
stderr, // GCOVR_EXCL_LINE
"Error calling mmap(%p, %zu, %d, %d, %d, %jd): %d %s\n", // GCOVR_EXCL_LINE
addr, len, prot, flags, fd, (intmax_t)offset, err, // GCOVR_EXCL_LINE
strerror(err)); // GCOVR_EXCL_LINE
fflush(stderr); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
return result;
}
void mprotectSafe(void *p, size_t s, int prot) {
if (mprotect(p, s, prot) != 0) {
int err = errno; // GCOVR_EXCL_LINE
fprintf(stderr, // GCOVR_EXCL_LINE
"Error calling mprotect(%p, %zu, %d): %s\n", // GCOVR_EXCL_LINE
p, // GCOVR_EXCL_LINE
s, // GCOVR_EXCL_LINE
prot, // GCOVR_EXCL_LINE
strerror(err)); // GCOVR_EXCL_LINE
fflush(stderr); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
}
void munmapSafe(void *ptr, size_t size) {
if (munmap(ptr, size) != 0) {
int err = errno; // GCOVR_EXCL_LINE
fprintf(stderr, "Error calling munmap(%p, %zu): %s\n", // GCOVR_EXCL_LINE
ptr, // GCOVR_EXCL_LINE
size, // GCOVR_EXCL_LINE
strerror(err)); // GCOVR_EXCL_LINE
fflush(stderr); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
}
namespace weaselab {
// 96 is enough for an entire search path in a tree with a size that
// overflows int. See
// https://en.wikipedia.org/wiki/Random_binary_tree#The_longest_path
constexpr int kPathLengthUpperBound = 96;
struct Entry {
// If there is a point mutation at key, then pointVersion is its version.
// Otherwise it's negative.
int64_t pointVersion;
// If there is a range mutation ending at key, then rangeVersion is its
// version. Otherwise it's negative.
int64_t rangeVersion;
int keyLen;
// Negative if this key is cleared. Only meaningful if this is a point
// mutation.
int valLen;
mutable int refCount;
uint32_t priority;
// True if the entry is a point mutation. If false, this entry's key should be
// read through to the underlying data structure.
bool pointMutation() const { return pointVersion >= 0; }
// True if mutations in (pred, this) are cleared. If false, (pred, this)
// should be read through to the underlying data structure.
bool clearTo() const { return rangeVersion >= 0; }
// There's an extra zero byte past the end of getKey, used for
// reconstructing logical mutations without copies.
const uint8_t *getKey() const { return (const uint8_t *)(this + 1); }
const uint8_t *getVal() const {
return (const uint8_t *)(this + 1) + 1 + keyLen;
}
Entry *addref() const {
++refCount;
return (Entry *)this;
}
void delref() const {
if (--refCount == 0) {
free((void *)this);
}
}
static Entry *make(int64_t pointVersion, int64_t rangeVersion,
const uint8_t *key, int keyLen, const uint8_t *val,
int valLen) {
auto e = (Entry *)malloc(sizeof(Entry) + keyLen + 1 + std::max(valLen, 0));
e->pointVersion = pointVersion;
e->rangeVersion = rangeVersion;
e->keyLen = keyLen;
e->valLen = valLen;
e->refCount = 1;
e->priority = XXH3_64bits(key, keyLen);
if (keyLen > 0) {
memcpy((uint8_t *)e->getKey(), key, keyLen);
}
((uint8_t *)e->getKey())[keyLen] = 0;
if (valLen > 0) {
memcpy((uint8_t *)e->getVal(), val, valLen);
}
return e;
}
};
struct Node {
union {
int64_t updateVersion;
uint32_t nextFree;
};
Entry *entry;
uint32_t pointer[3];
bool replacedPointer;
std::atomic<bool> updated;
};
// Limit mmap to 32 GiB so valgrind doesn't complain.
// https://bugs.kde.org/show_bug.cgi?id=229500
constexpr size_t kMapSize = size_t(32) * (1 << 30);
const size_t kPageSize = sysconf(_SC_PAGESIZE);
const uint32_t kNodesPerPage = kPageSize / sizeof(Node);
const uint32_t kMinAddressable = kNodesPerPage;
constexpr uint32_t kUpsizeBytes = 1 << 20;
constexpr uint32_t kUpsizeNodes = kUpsizeBytes / sizeof(Node);
static_assert(kUpsizeNodes * sizeof(Node) == kUpsizeBytes);
struct BitSet {
explicit BitSet(uint32_t size) : words((uint64_t *)malloc(size / 8 + 8)) {}
bool test(uint32_t i) const {
return words[i >> 6] & (uint64_t(1) << (i & 63));
}
// Returns former value
bool set(uint32_t i) {
const auto prev = words[i >> 6];
const auto mask = uint64_t(1) << (i & 63);
words[i >> 6] |= mask;
max_ = std::max(i, max_);
return prev & mask;
}
// Returns 0 if set is empty
uint32_t max() const { return max_; }
template <class F>
void iterateAbsentApproxBackwards(F f, uint32_t begin, uint32_t end) const {
// TODO can this be improved? We can do something with a word at a time
// instead of a bit at a time. The first attempt at doing so benchmarked as
// slower.
assert(begin != 0);
for (uint32_t i = end - 1; i >= begin; --i) {
if (!test(i)) {
f(i);
}
}
}
~BitSet() { free(words); }
private:
uint32_t max_ = 0;
uint64_t *const words;
};
struct MemManager {
MemManager()
: base((Node *)mmapSafe(nullptr, kMapSize, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) {
if (kPageSize % sizeof(Node) != 0) {
fprintf(stderr, // GCOVR_EXCL_LINE
"kPageSize not a multiple of Node size\n"); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
if (kUpsizeBytes % kPageSize != 0) {
fprintf(stderr, // GCOVR_EXCL_LINE
"kUpsizeBytes not a multiple of kPageSize\n"); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
}
~MemManager() {
gc(nullptr, 0, 0);
munmapSafe(base, kMapSize);
}
Node *const base;
uint32_t allocate() {
if (freeList != 0) {
uint32_t result = freeList;
freeList = base[result].nextFree;
assert(base[result].entry == nullptr);
return result;
}
if (next == firstUnaddressable) {
mprotectSafe(base + firstUnaddressable, kUpsizeBytes,
PROT_READ | PROT_WRITE);
firstUnaddressable += kUpsizeNodes;
if (firstUnaddressable > kMapSize / sizeof(Node)) {
fprintf( // GCOVR_EXCL_LINE
stderr, // GCOVR_EXCL_LINE
"Out of memory: firstUnaddressable > kMapSize / " // GCOVR_EXCL_LINE
"sizeof(Node)\n"); // GCOVR_EXCL_LINE
abort(); // GCOVR_EXCL_LINE
}
}
return next++;
}
void gc(const uint32_t *roots, int numRoots, int64_t oldestVersion) {
// Calculate reachable set
BitSet reachable{next};
// Each node has at most 3 children and nodes along the search path aren't
// in the stack, so we need 2 * kPathLengthUpperBound
uint32_t stack[2 * kPathLengthUpperBound];
int stackIndex = 0;
auto tryPush = [&](uint32_t p) {
if (!reachable.set(p)) {
assert(stackIndex < sizeof(stack) / sizeof(stack[0]));
stack[stackIndex++] = p;
}
};
for (int i = 0; i < numRoots; ++i) {
if (roots[i] == 0) {
continue;
}
tryPush(roots[i]);
while (stackIndex > 0) {
uint32_t p = stack[--stackIndex];
auto &node = base[p];
if (node.updated.load(std::memory_order_relaxed)) {
if (node.pointer[!node.replacedPointer] != 0) {
tryPush(node.pointer[!node.replacedPointer]);
}
if (oldestVersion < node.updateVersion) {
if (node.pointer[node.replacedPointer] != 0) {
tryPush(node.pointer[node.replacedPointer]);
}
}
tryPush(node.pointer[2]);
} else {
if (node.pointer[0] != 0) {
tryPush(node.pointer[0]);
}
if (node.pointer[1] != 0) {
tryPush(node.pointer[1]);
}
}
}
}
// Reclaim memory on the right side
uint32_t max = reachable.max();
if (max == 0) {
max = kMinAddressable - 1;
}
assert(max < next);
uint32_t newFirstUnaddressable = (max / kNodesPerPage + 1) * kNodesPerPage;
if (newFirstUnaddressable < firstUnaddressable) {
for (int i = newFirstUnaddressable; i < firstUnaddressable; ++i) {
if (base[i].entry != nullptr) {
#if DEBUG_VERBOSE
if (debugVerboseEnabled) {
printf("Collecting %u while shrinking right\n", i);
}
#endif
base[i].entry->delref();
}
}
mprotectSafe(base + newFirstUnaddressable,
(firstUnaddressable - newFirstUnaddressable) * sizeof(Node),
PROT_NONE);
firstUnaddressable = newFirstUnaddressable;
}
next = max + 1;
// Rebuild free list and delref entries
freeList = 0;
reachable.iterateAbsentApproxBackwards(
[&](uint32_t i) {
if (base[i].entry != nullptr) {
#if DEBUG_VERBOSE
if (debugVerboseEnabled) {
printf("Collecting %u while building free list\n", i);
}
#endif
base[i].entry->delref();
base[i].entry = nullptr;
}
base[i].nextFree = freeList;
freeList = i;
},
kMinAddressable, next);
}
private:
uint32_t next = kMinAddressable;
uint32_t firstUnaddressable = kMinAddressable;
uint32_t freeList = 0;
};
auto operator<=>(const VersionedMap::Key &lhs, const Node &rhs) {
int cl = std::min(lhs.len, rhs.entry->keyLen);
if (cl > 0) {
int c = memcmp(lhs.p, rhs.entry->getKey(), cl);
if (c != 0) {
return c <=> 0;
}
}
return lhs.len <=> rhs.entry->keyLen;
}
struct Finger {
void push(uint32_t node, bool dir) {
searchPath[searchPathSize_] = node;
direction[searchPathSize_] = dir;
++searchPathSize_;
}
void pop() {
assert(searchPathSize_ > 0);
--searchPathSize_;
}
uint32_t backNode() const {
assert(searchPathSize_ > 0);
return searchPath[searchPathSize_ - 1];
}
uint32_t &backNodeRef() {
assert(searchPathSize_ > 0);
return searchPath[searchPathSize_ - 1];
}
bool backDirection() const {
assert(searchPathSize_ > 0);
return direction[searchPathSize_ - 1];
}
uint32_t searchPathSize() const { return searchPathSize_; }
void setSearchPathSizeUnsafe(int size) { searchPathSize_ = size; }
Finger() : searchPathSize_(0) {}
Finger(const Finger &other) {
#ifndef NDEBUG
memset(searchPath, 0, sizeof(searchPath));
memset(direction, 0, sizeof(direction));
#endif
memcpy(searchPath, other.searchPath,
other.searchPathSize_ * sizeof(searchPath[0]));
memcpy(direction, other.direction,
other.searchPathSize_ * sizeof(direction[0]));
searchPathSize_ = other.searchPathSize_;
}
Finger &operator=(const Finger &other) {
#ifndef NDEBUG
memset(searchPath, 0, sizeof(searchPath));
memset(direction, 0, sizeof(direction));
#endif
memcpy(searchPath, other.searchPath,
other.searchPathSize_ * sizeof(searchPath[0]));
memcpy(direction, other.direction,
other.searchPathSize_ * sizeof(direction[0]));
searchPathSize_ = other.searchPathSize_;
return *this;
}
private:
uint32_t searchPath[kPathLengthUpperBound];
bool direction[kPathLengthUpperBound];
int searchPathSize_;
};
struct VersionedMap::Impl {
template <std::memory_order kOrder>
void move(Finger &finger, int64_t at, bool direction) {
uint32_t c;
if (finger.backNode() != 0 &&
(c = child<kOrder>(finger.backNode(), direction, at)) != 0) {
finger.push(c, direction);
while ((c = child<kOrder>(finger.backNode(), !direction, at)) != 0) {
finger.push(c, !direction);
}
} else {
while (finger.searchPathSize() > 1 && finger.backDirection() == true) {
finger.pop();
}
finger.pop();
}
}
template <std::memory_order kOrder>
uint32_t child(uint32_t node, bool which, int64_t at) {
static_assert(kOrder == std::memory_order_acquire ||
kOrder == std::memory_order_relaxed);
auto &n = mm.base[node];
uint32_t result;
if (n.updated.load(kOrder) && n.updateVersion <= at &&
which == n.replacedPointer) {
result = n.pointer[2];
} else {
result = n.pointer[which];
}
assert(result == 0 || result >= kMinAddressable);
return result;
}
template <std::memory_order kOrder>
uint32_t left(uint32_t node, bool which, int64_t at) {
return child<kOrder>(node, false, at);
}
template <std::memory_order kOrder>
uint32_t right(uint32_t node, bool which, int64_t at) {
return child<kOrder>(node, true, at);
}
// Returns the node that results from setting `which` to `child` on `node`
uint32_t update(uint32_t node, bool which, uint32_t child, int64_t version) {
assert(node == 0 || node >= kMinAddressable);
assert(child == 0 || child >= kMinAddressable);
if (this->child<std::memory_order_relaxed>(node, which, version) == child) {
return node;
}
auto &n = mm.base[node];
const bool updated = n.updated.load(std::memory_order_relaxed);
auto doCopy = [&]() {
uint32_t copy = mm.allocate();
auto &c = mm.base[copy];
c.entry = n.entry->addref();
c.pointer[which] = child;
c.pointer[!which] = n.pointer[!which];
c.updated.store(false, std::memory_order_relaxed);
c.updateVersion = version;
assert(copy == 0 || copy >= kMinAddressable);
return copy;
};
if (n.updateVersion == version) {
// The reason these aren't data races is that concurrent readers are
// reading < `version`
if (updated && n.replacedPointer != which) {
// We can't update n.replacedPointer without introducing a data race
// (unless we packed it into the atomic?) so we copy. pointer[2] becomes
// unreachable, but need to tell the garbage collector.
n.pointer[2] = 0;
return doCopy();
} else if (updated) {
n.pointer[2] = child;
} else {
n.pointer[which] = child;
}
assert(node == 0 || node >= kMinAddressable);
return node;
}
if (updated) {
// We already used this node's in-place update
return doCopy();
} else {
n.updateVersion = version;
n.pointer[2] = child;
n.replacedPointer = which;
n.updated.store(true, std::memory_order_release); // Must be last
assert(node == 0 || node >= kMinAddressable);
return node;
}
}
void rotate(uint32_t &n, int64_t at, bool right) {
auto l = child<std::memory_order_relaxed>(n, !right, at);
n = update(
l, right,
update(n, !right, child<std::memory_order_relaxed>(l, right, at), at),
at);
}
struct Val {
const uint8_t *p;
int len;
};
Finger search(Key key, int64_t at) {
Finger finger;
bool ignored;
finger.push(latestRoot, ignored);
// Initialize finger to the search path of `key`
for (;;) {
auto n = finger.backNode();
if (n == 0) {
break;
}
auto c = key <=> mm.base[n];
if (c == 0) {
// No duplicates
break;
}
finger.push(child<std::memory_order_relaxed>(n, c > 0, latestVersion),
c > 0);
}
return finger;
}
// If `val` is set, then this is a point mutation at `latestVersion`.
// Otherwise it's the end of a range mutation at `latestVersion`.
void insert(Key key, std::optional<Val> val) {
Finger finger;
bool ignored;
finger.push(latestRoot, ignored);
bool inserted;
// Initialize finger to the search path of `key`
for (;;) {
auto n = finger.backNode();
if (n == 0) {
inserted = true;
break;
}
auto c = key <=> mm.base[n];
if (c == 0) {
// No duplicates
inserted = false;
break;
}
finger.push(child<std::memory_order_relaxed>(n, c > 0, latestVersion),
c > 0);
}
int64_t pointVersion, rangeVersion;
if (val.has_value()) {
pointVersion = latestVersion;
if (inserted) {
auto copy = finger;
move<std::memory_order_relaxed>(copy, latestVersion, true);
if (copy.searchPathSize() == 0) {
rangeVersion = -1;
} else {
rangeVersion = mm.base[copy.backNode()].entry->rangeVersion;
}
} else {
auto *entry = mm.base[finger.backNode()].entry;
rangeVersion = entry->rangeVersion;
}
} else {
rangeVersion = latestVersion;
if (inserted) {
val = {nullptr, -1};
pointVersion = -1;
} else {
auto *entry = mm.base[finger.backNode()].entry;
val = {entry->getVal(), entry->valLen};
pointVersion = entry->pointVersion;
}
}
// Prepare new node
uint32_t node =
newNode(pointVersion, rangeVersion, key.p, key.len, val->p, val->len);
if (!inserted) {
auto &n = mm.base[node];
n.pointer[0] = child<std::memory_order_relaxed>(finger.backNode(), false,
latestVersion);
n.pointer[1] = child<std::memory_order_relaxed>(finger.backNode(), true,
latestVersion);
}
// Rotate and propagate up the search path
for (;;) {
if (finger.searchPathSize() == 1) {
// Made it to the root
latestRoot = node;
break;
}
const bool direction = finger.backDirection();
finger.pop();
auto parent = finger.backNode();
parent = update(parent, direction, node, latestVersion);
if (inserted &&
mm.base[node].entry->priority > mm.base[parent].entry->priority) {
rotate(parent, latestVersion, !direction);
} else {
if (parent == finger.backNode()) {
break;
}
}
node = parent;
}
}
// Removes `finger` from the tree, and leaves `finger` pointing to the next
// entry.
void remove(Finger &finger) {
// True if finger is pointing to an entry > than the entry we're removing
// after we rotate it down
bool greaterThan;
// Rotate down until we can remove the entry
for (;;) {
auto &node = finger.backNodeRef();
const auto l =
child<std::memory_order_relaxed>(node, false, latestVersion);
const auto r =
child<std::memory_order_relaxed>(node, true, latestVersion);
if (l == 0) {
node = r;
greaterThan = true;
break;
} else if (r == 0) {
node = l;
greaterThan = false;
break;
} else {
const bool direction =
mm.base[l].entry->priority > mm.base[r].entry->priority;
rotate(node, latestVersion, direction);
assert(node != 0);
finger.push(
child<std::memory_order_relaxed>(node, direction, latestVersion),
direction);
}
}
// propagate up the search path, all the way to the root since we may have
// more rotations to do even if an update doesn't change a node pointer
auto node = finger.backNode();
const auto oldSize = finger.searchPathSize();
for (;;) {
if (finger.searchPathSize() == 1) {
// Made it to the root
latestRoot = node;
break;
}
const bool direction = finger.backDirection();
finger.pop();
auto &parent = finger.backNodeRef();
auto old = parent;
parent = update(parent, direction, node, latestVersion);
node = parent;
}
finger.setSearchPathSizeUnsafe(oldSize);
if (greaterThan) {
uint32_t c;
while ((c = child<std::memory_order_relaxed>(finger.backNode(), false,
latestVersion)) != 0) {
finger.push(c, false);
}
} else {
move<std::memory_order_relaxed>(finger, latestVersion, true);
}
if (finger.backNode() == 0) {
finger.pop();
assert(finger.searchPathSize() == 0);
}
}
uint32_t newNode(int64_t version, int64_t rangeVersion, const uint8_t *key,
int keyLen, const uint8_t *val, int valLen) {
auto result = mm.allocate();
auto &node = mm.base[result];
node.updateVersion = version;
node.pointer[0] = 0;
node.pointer[1] = 0;
node.updated.store(false, std::memory_order_relaxed);
node.entry = Entry::make(version, rangeVersion, key, keyLen, val, valLen);
return result;
}
void setOldestVersion(int64_t oldestVersion) {
roots.setOldestVersion(oldestVersion);
mm.gc(roots.roots(), roots.rootCount(), oldestVersion);
}
void printInOrder(int64_t version);
void printInOrderHelper(int64_t version, uint32_t node, int depth);
void addMutations(const Mutation *mutations, int numMutations,
int64_t version) {
// TODO scan to remove mutations older than oldestVersion
assert(latestVersion < version);
latestVersion = version;
latestRoot = roots.roots()[roots.rootCount() - 1];
// TODO Improve ILP?
for (int i = 0; i < numMutations; ++i) {
const auto &m = mutations[i];
switch (m.type) {
case Set: {
insert({m.param1, m.param1Len}, {{m.param2, m.param2Len}});
} break;
case Clear: {
insert({m.param1, m.param1Len}, {{nullptr, -1}});
if (m.param2Len > 0) {
auto iter = search({m.param1, m.param1Len}, latestVersion);
move<std::memory_order_relaxed>(iter, latestVersion, true);
while (iter.searchPathSize() > 0 &&
mm.base[iter.backNode()] < Key{m.param2, m.param2Len}) {
remove(iter);
}
insert({m.param2, m.param2Len}, {});
}
} break;
default: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
}
}
roots.add(latestRoot, latestVersion);
}
MemManager mm;
RootSet roots;
// Only meaningful within the callstack of `addMutations`
uint32_t latestRoot;
int64_t latestVersion = 0;
};
VersionedMap::VersionedMap(int64_t version)
: impl(new(malloc(sizeof(Impl))) Impl()) {
impl->latestVersion = version;
}
VersionedMap::~VersionedMap() {
if (impl != nullptr) {
impl->~Impl();
free(impl);
}
}
VersionedMap::VersionedMap(VersionedMap &&other) noexcept {
impl = std::exchange(other.impl, nullptr);
}
VersionedMap &VersionedMap::operator=(VersionedMap &&other) noexcept {
impl = std::exchange(other.impl, nullptr);
return *this;
}
void VersionedMap::addMutations(const Mutation *mutations, int numMutations,
int64_t version) {
impl->addMutations(mutations, numMutations, version);
}
struct VersionedMap::Iterator::Impl {
Finger finger;
int64_t version;
VersionedMap::Impl *map;
int cmp;
// True if this is a point mutation and a range mutation, and we're
// materializing the range mutation instead of the point mutation.
bool materializeClearEndingHere = false;
};
VersionedMap::Iterator::~Iterator() {
if (impl != nullptr) {
impl->~Impl();
free(impl);
}
}
VersionedMap::Iterator::Iterator(const Iterator &other)
: impl(new(malloc(sizeof(Impl))) Impl(*other.impl)) {}
VersionedMap::Iterator &
VersionedMap::Iterator::operator=(const Iterator &other) {
if (impl != nullptr) {
impl->~Impl();
free(impl);
}
impl = new (malloc(sizeof(Impl))) Impl(*other.impl);
return *this;
}
VersionedMap::Iterator::Iterator(Iterator &&other) noexcept
: impl(std::exchange(other.impl, nullptr)) {}
VersionedMap::Iterator &
VersionedMap::Iterator::operator=(Iterator &&other) noexcept {
if (impl != nullptr) {
impl->~Impl();
free(impl);
}
impl = std::exchange(other.impl, nullptr);
return *this;
}
VersionedMap::Mutation VersionedMap::Iterator::operator*() const {
assert(impl->finger.backNode() != 0);
assert(impl->finger.searchPathSize() != 0);
const auto &entry = *impl->map->mm.base[impl->finger.backNode()].entry;
if (impl->materializeClearEndingHere) {
assert(entry.pointMutation() && entry.clearTo());
auto prev = *this;
--prev;
const auto &prevEntry =
*prev.impl->map->mm.base[prev.impl->finger.backNode()].entry;
return {prevEntry.getKey(), entry.getKey(), prevEntry.keyLen + 1,
entry.keyLen, Clear};
}
if (entry.valLen >= 0) {
return {entry.getKey(), entry.getVal(), entry.keyLen, entry.valLen, Set};
} else {
return {entry.getKey(), nullptr, entry.keyLen, -1, Clear};
}
}
VersionedMap::Iterator &VersionedMap::Iterator::operator++() {
const auto &entry = *impl->map->mm.base[impl->finger.backNode()].entry;
if (impl->materializeClearEndingHere) {
assert(entry.pointMutation() && entry.clearTo());
impl->materializeClearEndingHere = false;
return *this;
}
impl->map->move<std::memory_order_acquire>(impl->finger, impl->version, true);
impl->materializeClearEndingHere =
impl->finger.searchPathSize() > 0 &&
impl->map->mm.base[impl->finger.backNode()].entry->pointMutation() &&
impl->map->mm.base[impl->finger.backNode()].entry->clearTo();
return *this;
}
VersionedMap::Iterator VersionedMap::Iterator::operator++(int) {
auto result = *this;
++*this;
return result;
}
VersionedMap::Iterator &VersionedMap::Iterator::operator--() {
const auto &entry = *impl->map->mm.base[impl->finger.backNode()].entry;
if (entry.pointMutation() && entry.clearTo() &&
!impl->materializeClearEndingHere) {
impl->materializeClearEndingHere = true;
return *this;
}
impl->map->move<std::memory_order_acquire>(impl->finger, impl->version,
false);
impl->materializeClearEndingHere = false;
return *this;
}
VersionedMap::Iterator VersionedMap::Iterator::operator--(int) {
auto result = *this;
--*this;
return result;
}
bool VersionedMap::Iterator::operator==(const Iterator &other) const {
assert(impl->map == other.impl->map);
assert(impl->version == other.impl->version);
if (impl->finger.searchPathSize() == 0 ||
other.impl->finger.searchPathSize() == 0) {
return impl->finger.searchPathSize() == other.impl->finger.searchPathSize();
}
return impl->finger.backNode() == other.impl->finger.backNode() &&
impl->materializeClearEndingHere ==
other.impl->materializeClearEndingHere;
}
bool VersionedMap::Iterator::operator!=(const Iterator &other) const {
return !(*this == other);
}
int VersionedMap::Iterator::cmp() const { return impl->cmp; }
VersionedMap::Iterator VersionedMap::begin(int64_t version) const {
VersionedMap::Iterator result;
result.impl = new (malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl->cmp = 1;
bool ignored;
result.impl->finger.push(
impl->roots.getThreadSafeHandle().rootForVersion(version), ignored);
if (result.impl->finger.backNode() == 0) {
result.impl->finger.pop();
} else {
uint32_t c;
while ((c = impl->child<std::memory_order_relaxed>(
result.impl->finger.backNode(), false, version)) != 0) {
result.impl->finger.push(c, false);
}
}
result.impl->map = impl;
result.impl->materializeClearEndingHere = false;
result.impl->version = version;
return result;
}
VersionedMap::Iterator VersionedMap::end(int64_t version) const {
VersionedMap::Iterator result;
result.impl = new (malloc(sizeof(Iterator::Impl))) Iterator::Impl();
result.impl->cmp = 1;
result.impl->map = impl;
result.impl->materializeClearEndingHere = false;
result.impl->version = version;
return result;
}
// ==================== END IMPLEMENTATION ====================
// GCOVR_EXCL_START
void VersionedMap::Impl::printInOrder(int64_t version) {
printInOrderHelper(version,
roots.getThreadSafeHandle().rootForVersion(version), 0);
}
void VersionedMap::Impl::printInOrderHelper(int64_t version, uint32_t node,
int depth) {
if (node == 0) {
return;
}
printInOrderHelper(version,
child<std::memory_order_relaxed>(node, false, version),
depth + 1);
for (int i = 0; i < depth; ++i) {
printf(" ");
}
printf("%.*s", mm.base[node].entry->keyLen, mm.base[node].entry->getKey());
if (mm.base[node].entry->valLen >= 0) {
printf(" -> '%.*s' @ %" PRId64, mm.base[node].entry->valLen,
mm.base[node].entry->getVal(), mm.base[node].entry->pointVersion);
} else {
printf(" <cleared @ %" PRId64 ">", mm.base[node].entry->pointVersion);
}
if (mm.base[node].entry->clearTo()) {
printf(" <clearTo @ %" PRId64 ">", mm.base[node].entry->rangeVersion);
}
printf("\n");
VersionedMap::Impl::printInOrderHelper(
version, child<std::memory_order_relaxed>(node, true, version),
depth + 1);
}
VersionedMap::Impl *cast(const VersionedMap &m) {
VersionedMap::Impl *result;
memcpy(&result, &m, sizeof(void *));
return result;
}
} // namespace weaselab
#ifdef ENABLE_MAIN
#include <nanobench.h>
int main() {
{
weaselab::VersionedMap versionedMap{0};
{
weaselab::VersionedMap::Mutation m[] = {
{(const uint8_t *)"a", nullptr, 1, 0, weaselab::VersionedMap::Set},
{(const uint8_t *)"b", nullptr, 1, 0, weaselab::VersionedMap::Set},
{(const uint8_t *)"c", nullptr, 1, 0, weaselab::VersionedMap::Set},
{(const uint8_t *)"d", nullptr, 1, 0, weaselab::VersionedMap::Set},
{(const uint8_t *)"e", nullptr, 1, 0, weaselab::VersionedMap::Set},
{(const uint8_t *)"f", nullptr, 1, 0, weaselab::VersionedMap::Set},
};
versionedMap.addMutations(m, sizeof(m) / sizeof(m[0]), 1);
}
{
weaselab::VersionedMap::Mutation m[] = {
{(const uint8_t *)"a", (const uint8_t *)"d", 1, 1,
weaselab::VersionedMap::Clear},
};
versionedMap.addMutations(m, sizeof(m) / sizeof(m[0]), 2);
}
{
weaselab::VersionedMap::Mutation m[] = {
{(const uint8_t *)"b", (const uint8_t *)"", 1, 0,
weaselab::VersionedMap::Clear},
};
versionedMap.addMutations(m, sizeof(m) / sizeof(m[0]), 3);
}
const int64_t v = 3;
cast(versionedMap)->printInOrder(v);
for (auto iter = versionedMap.begin(v), end = versionedMap.end(v);
iter != end; ++iter) {
const auto &m = *iter;
switch (m.type) {
case weaselab::VersionedMap::Set:
printf("set ");
for (int i = 0; i < m.param1Len; ++i) {
printf("x%02x", m.param1[i]);
}
printf(" -> ");
for (int i = 0; i < m.param2Len; ++i) {
printf("x%02x", m.param2[i]);
}
printf("\n");
break;
case weaselab::VersionedMap::Clear:
printf("clear [");
for (int i = 0; i < m.param1Len; ++i) {
printf("x%02x", m.param1[i]);
}
printf(", ");
for (int i = 0; i < m.param2Len; ++i) {
printf("x%02x", m.param2[i]);
}
printf(")\n");
break;
default: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
}
}
}
return 0;
}
#endif
// GCOVR_EXCL_STOP