Compare commits
6 Commits
b8f6a8edf2
...
71e117965e
| Author | SHA1 | Date | |
|---|---|---|---|
| 71e117965e | |||
| 471b276947 | |||
| b721bc80a9 | |||
| 5e4eab55fb | |||
| 1dcb380c73 | |||
| 87d650ff00 |
368
ConflictSet.cpp
368
ConflictSet.cpp
@@ -261,8 +261,8 @@ struct Node256 : Node {
|
||||
|
||||
// Bound memory usage following the analysis in the ART paper
|
||||
|
||||
constexpr int kBytesPerKey = 86;
|
||||
constexpr int kMinChildrenNode4 = 2;
|
||||
constexpr int kBytesPerKey = 121;
|
||||
constexpr int kMinChildrenNode4 = 1;
|
||||
constexpr int kMinChildrenNode16 = 5;
|
||||
constexpr int kMinChildrenNode48 = 17;
|
||||
constexpr int kMinChildrenNode256 = 49;
|
||||
@@ -462,6 +462,8 @@ int64_t getChildMaxVersion(Node *self, uint8_t index) {
|
||||
// Precondition - an entry for index must exist in the node
|
||||
int64_t &maxVersion(Node *n, ConflictSet::Impl *);
|
||||
|
||||
Node *&getInTree(Node *n, ConflictSet::Impl *);
|
||||
|
||||
Node *getChild(Node *self, uint8_t index) {
|
||||
if (self->type <= Type::Node16) {
|
||||
auto *self16 = static_cast<Node16 *>(self);
|
||||
@@ -549,6 +551,12 @@ int getChildGeq(Node *self, int child) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void setChildrenParents(Node4 *n) {
|
||||
for (int i = 0; i < n->numChildren; ++i) {
|
||||
n->children[i].child->parent = n;
|
||||
}
|
||||
}
|
||||
|
||||
void setChildrenParents(Node16 *n) {
|
||||
for (int i = 0; i < n->numChildren; ++i) {
|
||||
n->children[i].child->parent = n;
|
||||
@@ -703,61 +711,6 @@ Node *&getOrCreateChild(Node *&self, uint8_t index,
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition - an entry for index must exist in the node
|
||||
void eraseChild(Node *self, uint8_t index, NodeAllocators *allocators) {
|
||||
auto *child = getChildExists(self, index);
|
||||
switch (child->type) {
|
||||
case Type::Node0:
|
||||
allocators->node0.release((Node0 *)child);
|
||||
break;
|
||||
case Type::Node4:
|
||||
allocators->node4.release((Node4 *)child);
|
||||
break;
|
||||
case Type::Node16:
|
||||
allocators->node16.release((Node16 *)child);
|
||||
break;
|
||||
case Type::Node48:
|
||||
allocators->node48.release((Node48 *)child);
|
||||
break;
|
||||
case Type::Node256:
|
||||
allocators->node256.release((Node256 *)child);
|
||||
break;
|
||||
}
|
||||
|
||||
if (self->type <= Type::Node16) {
|
||||
auto *self16 = static_cast<Node16 *>(self);
|
||||
int nodeIndex = getNodeIndex(self16, index);
|
||||
memmove(self16->index + nodeIndex, self16->index + nodeIndex + 1,
|
||||
sizeof(self16->index[0]) * (self->numChildren - (nodeIndex + 1)));
|
||||
memmove(self16->children + nodeIndex, self16->children + nodeIndex + 1,
|
||||
sizeof(self16->children[0]) * // NOLINT
|
||||
(self->numChildren - (nodeIndex + 1)));
|
||||
} else if (self->type == Type::Node48) {
|
||||
auto *self48 = static_cast<Node48 *>(self);
|
||||
self48->bitSet.reset(index);
|
||||
int8_t toRemoveChildrenIndex = std::exchange(self48->index[index], -1);
|
||||
int8_t lastChildrenIndex = --self48->nextFree;
|
||||
assert(toRemoveChildrenIndex >= 0);
|
||||
assert(lastChildrenIndex >= 0);
|
||||
if (toRemoveChildrenIndex != lastChildrenIndex) {
|
||||
self48->children[toRemoveChildrenIndex] =
|
||||
self48->children[lastChildrenIndex];
|
||||
self48
|
||||
->index[self48->children[toRemoveChildrenIndex].child->parentsIndex] =
|
||||
toRemoveChildrenIndex;
|
||||
}
|
||||
} else {
|
||||
auto *self256 = static_cast<Node256 *>(self);
|
||||
self256->bitSet.reset(index);
|
||||
self256->children[index].child = nullptr;
|
||||
}
|
||||
--self->numChildren;
|
||||
if (self->numChildren == 0 && !self->entryPresent &&
|
||||
self->parent != nullptr) {
|
||||
eraseChild(self->parent, self->parentsIndex, allocators);
|
||||
}
|
||||
}
|
||||
|
||||
Node *nextPhysical(Node *node) {
|
||||
int index = -1;
|
||||
for (;;) {
|
||||
@@ -780,6 +733,203 @@ Node *nextLogical(Node *node) {
|
||||
return node;
|
||||
}
|
||||
|
||||
// TODO fuse into erase child so we don't need to repeat branches on type
|
||||
void maybeDownsize(Node *self, NodeAllocators *allocators,
|
||||
ConflictSet::Impl *impl) {
|
||||
switch (self->type) {
|
||||
case Type::Node0:
|
||||
__builtin_unreachable(); // GCOVR_EXCL_LINE
|
||||
case Type::Node4: {
|
||||
auto *self4 = (Node4 *)self;
|
||||
if (self->numChildren == 0) {
|
||||
auto *newSelf = allocators->node0.allocate(self->partialKeyLen);
|
||||
memcpy((char *)newSelf + kNodeCopyBegin, (char *)self + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(newSelf->partialKey(), self4->partialKey(), self->partialKeyLen);
|
||||
|
||||
getInTree(self, impl) = newSelf;
|
||||
allocators->node4.release(self4);
|
||||
} else if (self->numChildren == 1) {
|
||||
if (!self->entryPresent) {
|
||||
auto *child = self4->children[0].child;
|
||||
int minCapacity = self4->partialKeyLen + 1 + child->partialKeyLen;
|
||||
|
||||
if (minCapacity > child->partialKeyCapacity) {
|
||||
// TODO resize child? It seems to be quite challenging to implement,
|
||||
// since callers would now have to account for erase invalidating
|
||||
// not on the search path. We could lower kBytesPerKey by doing this
|
||||
// though.
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge partial key with child
|
||||
#if DEBUG_VERBOSE && !defined(NDEBUG)
|
||||
fprintf(stderr, "Merge %s into %s\n",
|
||||
getSearchPathPrintable(self).c_str(),
|
||||
getSearchPathPrintable(child).c_str());
|
||||
#endif
|
||||
|
||||
int64_t childMaxVersion = maxVersion(child, impl);
|
||||
|
||||
// Construct new partial key for child
|
||||
memmove(child->partialKey() + self4->partialKeyLen + 1,
|
||||
child->partialKey(), child->partialKeyLen);
|
||||
memcpy(child->partialKey(), self4->partialKey(), self->partialKeyLen);
|
||||
child->partialKey()[self4->partialKeyLen] = self4->index[0];
|
||||
child->partialKeyLen += 1 + self4->partialKeyLen;
|
||||
|
||||
child->parent = self->parent;
|
||||
child->parentsIndex = self->parentsIndex;
|
||||
|
||||
// Max versions are stored in the parent, so we need to update it now
|
||||
// that we have a new parent.
|
||||
maxVersion(child, impl) = childMaxVersion;
|
||||
|
||||
getInTree(self, impl) = child;
|
||||
allocators->node4.release(self4);
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Type::Node16:
|
||||
if (self->numChildren < kMinChildrenNode16) {
|
||||
auto *self16 = (Node16 *)self;
|
||||
auto *newSelf = allocators->node4.allocate(self->partialKeyLen);
|
||||
memcpy((char *)newSelf + kNodeCopyBegin, (char *)self + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(newSelf->partialKey(), self16->partialKey(), self->partialKeyLen);
|
||||
// TODO replace with memcpy?
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
newSelf->index[i] = self16->index[i];
|
||||
newSelf->children[i] = self16->children[i];
|
||||
}
|
||||
setChildrenParents(newSelf);
|
||||
getInTree(self, impl) = newSelf;
|
||||
allocators->node16.release(self16);
|
||||
}
|
||||
break;
|
||||
case Type::Node48:
|
||||
if (self->numChildren < kMinChildrenNode48) {
|
||||
auto *self48 = (Node48 *)self;
|
||||
auto *newSelf = allocators->node16.allocate(self->partialKeyLen);
|
||||
memcpy((char *)newSelf + kNodeCopyBegin, (char *)self + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(newSelf->partialKey(), self48->partialKey(), self->partialKeyLen);
|
||||
|
||||
int i = 0;
|
||||
self48->bitSet.forEachInRange(
|
||||
[&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc. `assume` doesn't work for some reason.
|
||||
if (!(i < 16)) {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
newSelf->index[i] = c;
|
||||
newSelf->children[i] = self48->children[self48->index[c]];
|
||||
++i;
|
||||
},
|
||||
0, 256);
|
||||
|
||||
setChildrenParents(newSelf);
|
||||
getInTree(self, impl) = newSelf;
|
||||
allocators->node48.release(self48);
|
||||
}
|
||||
break;
|
||||
case Type::Node256:
|
||||
if (self->numChildren < kMinChildrenNode256) {
|
||||
auto *self256 = (Node256 *)self;
|
||||
auto *newSelf = allocators->node48.allocate(self->partialKeyLen);
|
||||
memcpy((char *)newSelf + kNodeCopyBegin, (char *)self + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(newSelf->partialKey(), self256->partialKey(), self->partialKeyLen);
|
||||
|
||||
newSelf->bitSet = self256->bitSet;
|
||||
newSelf->bitSet.forEachInRange(
|
||||
[&](int c) {
|
||||
newSelf->index[c] = newSelf->nextFree;
|
||||
newSelf->children[newSelf->nextFree] = self256->children[c];
|
||||
++newSelf->nextFree;
|
||||
},
|
||||
0, 256);
|
||||
|
||||
setChildrenParents(newSelf);
|
||||
getInTree(self, impl) = newSelf;
|
||||
allocators->node256.release(self256);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: self is not the root. May invalidate nodes along the search
|
||||
// path to self.
|
||||
Node *erase(Node *self, NodeAllocators *allocators, ConflictSet::Impl *impl) {
|
||||
assert(self->parent != nullptr);
|
||||
|
||||
Node *parent = self->parent;
|
||||
uint8_t parentsIndex = self->parentsIndex;
|
||||
|
||||
auto *result = nextLogical(self);
|
||||
self->entryPresent = false;
|
||||
if (self->numChildren != 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
switch (self->type) {
|
||||
case Type::Node0:
|
||||
allocators->node0.release((Node0 *)self);
|
||||
break;
|
||||
case Type::Node4:
|
||||
allocators->node4.release((Node4 *)self);
|
||||
break;
|
||||
case Type::Node16:
|
||||
allocators->node16.release((Node16 *)self);
|
||||
break;
|
||||
case Type::Node48:
|
||||
allocators->node48.release((Node48 *)self);
|
||||
break;
|
||||
case Type::Node256:
|
||||
allocators->node256.release((Node256 *)self);
|
||||
break;
|
||||
}
|
||||
|
||||
if (parent->type <= Type::Node16) {
|
||||
auto *parent16 = static_cast<Node16 *>(parent);
|
||||
int nodeIndex = getNodeIndex(parent16, parentsIndex);
|
||||
assert(nodeIndex >= 0);
|
||||
memmove(parent16->index + nodeIndex, parent16->index + nodeIndex + 1,
|
||||
sizeof(parent16->index[0]) *
|
||||
(parent->numChildren - (nodeIndex + 1)));
|
||||
memmove(parent16->children + nodeIndex, parent16->children + nodeIndex + 1,
|
||||
sizeof(parent16->children[0]) *
|
||||
(parent->numChildren - (nodeIndex + 1)));
|
||||
} else if (parent->type == Type::Node48) {
|
||||
auto *parent48 = static_cast<Node48 *>(parent);
|
||||
parent48->bitSet.reset(parentsIndex);
|
||||
int8_t toRemoveChildrenIndex =
|
||||
std::exchange(parent48->index[parentsIndex], -1);
|
||||
int8_t lastChildrenIndex = --parent48->nextFree;
|
||||
assert(toRemoveChildrenIndex >= 0);
|
||||
assert(lastChildrenIndex >= 0);
|
||||
if (toRemoveChildrenIndex != lastChildrenIndex) {
|
||||
parent48->children[toRemoveChildrenIndex] =
|
||||
parent48->children[lastChildrenIndex];
|
||||
parent48->index[parent48->children[toRemoveChildrenIndex]
|
||||
.child->parentsIndex] = toRemoveChildrenIndex;
|
||||
}
|
||||
} else {
|
||||
auto *parent256 = static_cast<Node256 *>(parent);
|
||||
parent256->bitSet.reset(parentsIndex);
|
||||
parent256->children[parentsIndex].child = nullptr;
|
||||
}
|
||||
--parent->numChildren;
|
||||
if (parent->numChildren == 0 && !parent->entryPresent &&
|
||||
parent->parent != nullptr) {
|
||||
erase(parent, allocators, impl);
|
||||
} else {
|
||||
maybeDownsize(parent, allocators, impl);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
struct Iterator {
|
||||
Node *n;
|
||||
int cmp;
|
||||
@@ -1562,7 +1712,7 @@ bool checkRangeRead(Node *n, std::span<const uint8_t> begin,
|
||||
// Returns a pointer to the newly inserted node. Caller must set
|
||||
// `entryPresent`, `entry` fields and `maxVersion` on the result. The search
|
||||
// path of the result's parent will have `maxVersion` at least `writeVersion` as
|
||||
// a postcondition.
|
||||
// a postcondition. Nodes along the search path to `key` may be invalidated.
|
||||
template <bool kBegin>
|
||||
[[nodiscard]] Node *insert(Node **self, std::span<const uint8_t> key,
|
||||
int64_t writeVersion, NodeAllocators *allocators,
|
||||
@@ -1689,6 +1839,7 @@ void addWriteRange(Node *&root, int64_t oldestVersion,
|
||||
return addPointWrite(root, oldestVersion, begin, writeVersion, allocators,
|
||||
impl);
|
||||
}
|
||||
const bool beginIsPrefix = lcp == int(begin.size());
|
||||
auto remaining = begin.subspan(0, lcp);
|
||||
|
||||
auto *n = root;
|
||||
@@ -1717,9 +1868,7 @@ void addWriteRange(Node *&root, int64_t oldestVersion,
|
||||
n = child;
|
||||
}
|
||||
|
||||
Node **useAsRoot = n->parent == nullptr
|
||||
? &root
|
||||
: &getChildExists(n->parent, n->parentsIndex);
|
||||
Node **useAsRoot = &getInTree(n, impl);
|
||||
|
||||
int consumed = lcp - remaining.size();
|
||||
|
||||
@@ -1759,19 +1908,15 @@ void addWriteRange(Node *&root, int64_t oldestVersion,
|
||||
}
|
||||
endNode->entry.rangeVersion = writeVersion;
|
||||
|
||||
if (insertedEnd) {
|
||||
// beginNode may have been invalidated
|
||||
if (beginIsPrefix && insertedEnd) {
|
||||
// beginNode may have been invalidated when inserting end. TODO can we do
|
||||
// better?
|
||||
beginNode = insert<true>(useAsRoot, begin, writeVersion, allocators, impl);
|
||||
assert(beginNode->entryPresent);
|
||||
}
|
||||
|
||||
for (beginNode = nextLogical(beginNode); beginNode != endNode;) {
|
||||
auto *old = beginNode;
|
||||
beginNode = nextLogical(beginNode);
|
||||
old->entryPresent = false;
|
||||
if (old->numChildren == 0 && old->parent != nullptr) {
|
||||
eraseChild(old->parent, old->parentsIndex, allocators);
|
||||
}
|
||||
for (beginNode = nextLogical(beginNode); beginNode != endNode;
|
||||
beginNode = erase(beginNode, allocators, impl)) {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1881,34 +2026,33 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
if (keyUpdates < 100) {
|
||||
return;
|
||||
}
|
||||
Node *prev = firstGeq(root, removalKey).n;
|
||||
Node *n = firstGeq(root, removalKey).n;
|
||||
// There's no way to erase removalKey without introducing a key after it
|
||||
assert(prev != nullptr);
|
||||
for (; keyUpdates > 0; --keyUpdates) {
|
||||
Node *n = nextLogical(prev);
|
||||
if (n == nullptr) {
|
||||
removalKey = {};
|
||||
return;
|
||||
}
|
||||
|
||||
if (std::max(prev->entry.pointVersion, prev->entry.rangeVersion) <=
|
||||
assert(n != nullptr);
|
||||
// Don't erase the root
|
||||
if (n == root) {
|
||||
n = nextLogical(n);
|
||||
}
|
||||
for (; keyUpdates > 0 && n != nullptr; --keyUpdates) {
|
||||
if (std::max(n->entry.pointVersion, n->entry.rangeVersion) <=
|
||||
oldestVersion) {
|
||||
// Any transaction prev would have prevented from committing is
|
||||
// Any transaction n would have prevented from committing is
|
||||
// going to fail with TooOld anyway.
|
||||
|
||||
// There's no way to insert a range such that range version of the right
|
||||
// node is greater than the point version of the left node
|
||||
assert(n->entry.rangeVersion <= oldestVersion);
|
||||
prev->entryPresent = false;
|
||||
if (prev->numChildren == 0 && prev->parent != nullptr) {
|
||||
eraseChild(prev->parent, prev->parentsIndex, &allocators);
|
||||
}
|
||||
n = erase(n, &allocators, this);
|
||||
} else {
|
||||
n = nextLogical(n);
|
||||
}
|
||||
|
||||
prev = n;
|
||||
}
|
||||
if (n == nullptr) {
|
||||
removalKey = {};
|
||||
return;
|
||||
}
|
||||
removalKeyArena = Arena();
|
||||
removalKey = getSearchPath(removalKeyArena, prev);
|
||||
removalKey = getSearchPath(removalKeyArena, n);
|
||||
}
|
||||
|
||||
explicit Impl(int64_t oldestVersion) : oldestVersion(oldestVersion) {
|
||||
@@ -1954,6 +2098,11 @@ int64_t &maxVersion(Node *n, ConflictSet::Impl *impl) {
|
||||
}
|
||||
}
|
||||
|
||||
Node *&getInTree(Node *n, ConflictSet::Impl *impl) {
|
||||
return n->parent == nullptr ? impl->root
|
||||
: getChildExists(n->parent, n->parentsIndex);
|
||||
}
|
||||
|
||||
// ==================== END IMPLEMENTATION ====================
|
||||
|
||||
// GCOVR_EXCL_START
|
||||
@@ -2225,6 +2374,48 @@ Iterator firstGeq(Node *n, std::string_view key) {
|
||||
return total;
|
||||
}
|
||||
|
||||
[[maybe_unused]] void checkMemoryBoundInvariants(Node *node, bool &success) {
|
||||
int minNumChildren;
|
||||
switch (node->type) {
|
||||
case Type::Node0:
|
||||
minNumChildren = 0;
|
||||
break;
|
||||
case Type::Node4:
|
||||
minNumChildren = kMinChildrenNode4;
|
||||
break;
|
||||
case Type::Node16:
|
||||
minNumChildren = kMinChildrenNode16;
|
||||
break;
|
||||
case Type::Node48:
|
||||
minNumChildren = kMinChildrenNode48;
|
||||
break;
|
||||
case Type::Node256:
|
||||
minNumChildren = kMinChildrenNode256;
|
||||
break;
|
||||
}
|
||||
if (node->numChildren < minNumChildren) {
|
||||
fprintf(stderr,
|
||||
"%s has %d children, which is less than the minimum required %d\n",
|
||||
getSearchPathPrintable(node).c_str(), node->numChildren,
|
||||
minNumChildren);
|
||||
success = false;
|
||||
}
|
||||
// if (node->numChildren > 0 &&
|
||||
// node->numChildren * node->partialKeyLen < node->partialKeyCapacity) {
|
||||
// fprintf(stderr,
|
||||
// "%s has %d children, partial key length %d, and partial key "
|
||||
// "capacity %d. It's required that nodes with children have
|
||||
// children "
|
||||
// "* length >= capacity\n",
|
||||
// getSearchPathPrintable(node).c_str(), node->numChildren,
|
||||
// node->partialKeyLen, node->partialKeyCapacity);
|
||||
// success = false;
|
||||
// }
|
||||
for (int i = getChildGeq(node, 0); i >= 0; i = getChildGeq(node, i + 1)) {
|
||||
auto *child = getChildExists(node, i);
|
||||
checkMemoryBoundInvariants(child, success);
|
||||
}
|
||||
}
|
||||
bool checkCorrectness(Node *node, int64_t oldestVersion,
|
||||
ConflictSet::Impl *impl) {
|
||||
bool success = true;
|
||||
@@ -2232,6 +2423,7 @@ bool checkCorrectness(Node *node, int64_t oldestVersion,
|
||||
checkParentPointers(node, success);
|
||||
checkMaxVersion(node, node, oldestVersion, success, impl);
|
||||
checkEntriesExist(node, success);
|
||||
checkMemoryBoundInvariants(node, success);
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user