Compare commits
7 Commits
v0.0.13
...
4b82502946
Author | SHA1 | Date | |
---|---|---|---|
4b82502946 | |||
68bbacb69a | |||
3078845673 | |||
43f6126cc4 | |||
b911d87d55 | |||
0c65a82b78 | |||
e024cb8291 |
16
Bench.cpp
16
Bench.cpp
@@ -361,7 +361,21 @@ void benchWorstCaseForRadixRangeRead() {
|
||||
void benchCreateAndDestroy() {
|
||||
ankerl::nanobench::Bench bench;
|
||||
|
||||
bench.run("create and destroy", [&]() { ConflictSet cs{0}; });
|
||||
bench.run("create and destroy", [&]() {
|
||||
ConflictSet cs{0};
|
||||
ConflictSet::WriteRange w;
|
||||
uint8_t b[9];
|
||||
b[8] = 0;
|
||||
for (int64_t i = 0; i < 1000; i += 7) {
|
||||
auto x = __builtin_bswap64(i);
|
||||
memcpy(b, &x, 8);
|
||||
w.begin.p = b;
|
||||
w.begin.len = 8;
|
||||
w.end.len = 0;
|
||||
w.end.p = b;
|
||||
cs.addWrites(&w, 1, 1);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
|
507
ConflictSet.cpp
507
ConflictSet.cpp
@@ -224,6 +224,9 @@ struct Node0 : Node {
|
||||
|
||||
void copyChildrenAndKeyFrom(const Node0 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node3 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node16 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node48 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node256 &other);
|
||||
|
||||
size_t size() const { return sizeof(Node0) + getCapacity(); }
|
||||
};
|
||||
@@ -240,6 +243,8 @@ struct Node3 : Node {
|
||||
void copyChildrenAndKeyFrom(const Node0 &other);
|
||||
void copyChildrenAndKeyFrom(const Node3 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node16 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node48 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node256 &other);
|
||||
|
||||
size_t size() const { return sizeof(Node3) + getCapacity(); }
|
||||
};
|
||||
@@ -256,6 +261,7 @@ struct Node16 : Node {
|
||||
void copyChildrenAndKeyFrom(const Node3 &other);
|
||||
void copyChildrenAndKeyFrom(const Node16 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node48 &other);
|
||||
void copyChildrenAndKeyFrom(const struct Node256 &other);
|
||||
|
||||
size_t size() const { return sizeof(Node16) + getCapacity(); }
|
||||
};
|
||||
@@ -313,6 +319,24 @@ inline void Node0::copyChildrenAndKeyFrom(const Node3 &other) {
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node0::copyChildrenAndKeyFrom(const Node16 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node0::copyChildrenAndKeyFrom(const Node48 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node0::copyChildrenAndKeyFrom(const Node256 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node3::copyChildrenAndKeyFrom(const Node0 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
@@ -344,17 +368,53 @@ inline void Node3::copyChildrenAndKeyFrom(const Node16 &other) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void Node3::copyChildrenAndKeyFrom(const Node48 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
int i = 0;
|
||||
other.bitSet.forEachSet([&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc
|
||||
assume(i < kMaxNodes);
|
||||
index[i] = c;
|
||||
children[i] = other.children[other.index[c]];
|
||||
childMaxVersion[i] = other.childMaxVersion[other.index[c]];
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
++i;
|
||||
});
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node3::copyChildrenAndKeyFrom(const Node256 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
int i = 0;
|
||||
other.bitSet.forEachSet([&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc
|
||||
assume(i < kMaxNodes);
|
||||
index[i] = c;
|
||||
children[i] = other.children[c];
|
||||
childMaxVersion[i] = other.childMaxVersion[c];
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
++i;
|
||||
});
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node16::copyChildrenAndKeyFrom(const Node3 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
memcpy(index, other.index, Node3::kMaxNodes);
|
||||
memcpy(index, other.index, other.kMaxNodes);
|
||||
memcpy(children, other.children,
|
||||
Node3::kMaxNodes * sizeof(children[0])); // NOLINT
|
||||
other.kMaxNodes * sizeof(children[0])); // NOLINT
|
||||
memcpy(childMaxVersion, other.childMaxVersion,
|
||||
Node3::kMaxNodes * sizeof(childMaxVersion[0]));
|
||||
other.kMaxNodes * sizeof(childMaxVersion[0]));
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
assert(numChildren == Node3::kMaxNodes);
|
||||
for (int i = 0; i < Node3::kMaxNodes; ++i) {
|
||||
assert(numChildren == other.kMaxNodes);
|
||||
for (int i = 0; i < other.kMaxNodes; ++i) {
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
}
|
||||
@@ -380,7 +440,7 @@ inline void Node16::copyChildrenAndKeyFrom(const Node48 &other) {
|
||||
other.bitSet.forEachSet([&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc
|
||||
assume(i < Node16::kMaxNodes);
|
||||
assume(i < kMaxNodes);
|
||||
index[i] = c;
|
||||
children[i] = other.children[other.index[c]];
|
||||
childMaxVersion[i] = other.childMaxVersion[other.index[c]];
|
||||
@@ -391,10 +451,28 @@ inline void Node16::copyChildrenAndKeyFrom(const Node48 &other) {
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node16::copyChildrenAndKeyFrom(const Node256 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
int i = 0;
|
||||
other.bitSet.forEachSet([&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc
|
||||
assume(i < kMaxNodes);
|
||||
index[i] = c;
|
||||
children[i] = other.children[c];
|
||||
childMaxVersion[i] = other.childMaxVersion[c];
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
++i;
|
||||
});
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
|
||||
inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
|
||||
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
||||
kNodeCopySize);
|
||||
assert(numChildren == Node16::kMaxNodes);
|
||||
assert(numChildren == other.kMaxNodes);
|
||||
memset(index, -1, sizeof(index));
|
||||
memset(children, 0, sizeof(children));
|
||||
const auto z = InternalVersionT::zero;
|
||||
@@ -403,7 +481,7 @@ inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
|
||||
}
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
bitSet.init();
|
||||
nextFree = Node16::kMaxNodes;
|
||||
nextFree = other.kMaxNodes;
|
||||
int i = 0;
|
||||
for (auto x : other.index) {
|
||||
bitSet.set(x);
|
||||
@@ -413,8 +491,8 @@ inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
reverseIndex[i] = x;
|
||||
maxOfMax[i >> Node48::kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[i >> Node48::kMaxOfMaxShift], childMaxVersion[i]);
|
||||
maxOfMax[i >> kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[i >> kMaxOfMaxShift], childMaxVersion[i]);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
@@ -456,15 +534,15 @@ inline void Node48::copyChildrenAndKeyFrom(const Node256 &other) {
|
||||
bitSet.forEachSet([&](int c) {
|
||||
// Suppress a false positive -Waggressive-loop-optimizations warning
|
||||
// in gcc.
|
||||
assume(i < Node48::kMaxNodes);
|
||||
assume(i < kMaxNodes);
|
||||
index[c] = i;
|
||||
children[i] = other.children[c];
|
||||
childMaxVersion[i] = other.childMaxVersion[c];
|
||||
assert(children[i]->parent == &other);
|
||||
children[i]->parent = this;
|
||||
reverseIndex[i] = c;
|
||||
maxOfMax[i >> Node48::kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[i >> Node48::kMaxOfMaxShift], childMaxVersion[i]);
|
||||
maxOfMax[i >> kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[i >> kMaxOfMaxShift], childMaxVersion[i]);
|
||||
++i;
|
||||
});
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
@@ -487,8 +565,8 @@ inline void Node256::copyChildrenAndKeyFrom(const Node48 &other) {
|
||||
childMaxVersion[c] = other.childMaxVersion[other.index[c]];
|
||||
assert(children[c]->parent == &other);
|
||||
children[c]->parent = this;
|
||||
maxOfMax[c >> Node256::kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[c >> Node256::kMaxOfMaxShift], childMaxVersion[c]);
|
||||
maxOfMax[c >> kMaxOfMaxShift] =
|
||||
std::max(maxOfMax[c >> kMaxOfMaxShift], childMaxVersion[c]);
|
||||
});
|
||||
memcpy(partialKey(), &other + 1, partialKeyLen);
|
||||
}
|
||||
@@ -740,6 +818,7 @@ struct WriteContext {
|
||||
int64_t nodes_allocated;
|
||||
int64_t nodes_released;
|
||||
int64_t point_writes;
|
||||
int64_t prefix_writes;
|
||||
int64_t range_writes;
|
||||
int64_t write_bytes;
|
||||
} accum;
|
||||
@@ -1557,6 +1636,7 @@ void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
|
||||
case Type_Node3: {
|
||||
auto *self3 = (Node3 *)self;
|
||||
if (self->numChildren == 0) {
|
||||
assert(self->entryPresent);
|
||||
auto *newSelf = tls->allocate<Node0>(self->partialKeyLen);
|
||||
newSelf->copyChildrenAndKeyFrom(*self3);
|
||||
getInTree(self, impl) = newSelf;
|
||||
@@ -1635,10 +1715,242 @@ void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
|
||||
}
|
||||
}
|
||||
|
||||
void destroyTree(Node *root, WriteContext::Accum *accum) {
|
||||
Arena arena;
|
||||
auto toFree = vector<Node *>(arena);
|
||||
toFree.push_back(root);
|
||||
|
||||
#if SHOW_MEMORY
|
||||
for (auto *iter = root; iter != nullptr; iter = nextPhysical(iter)) {
|
||||
removeNode(iter);
|
||||
removeKey(iter);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (toFree.size() > 0) {
|
||||
auto *n = toFree.back();
|
||||
toFree.pop_back();
|
||||
accum->entries_erased += n->entryPresent;
|
||||
++accum->nodes_released;
|
||||
|
||||
switch (n->getType()) {
|
||||
case Type_Node0: {
|
||||
} break;
|
||||
case Type_Node3: {
|
||||
auto *n3 = static_cast<Node3 *>(n);
|
||||
toFree.append(std::span<Node *>(n3->children, n3->numChildren));
|
||||
} break;
|
||||
case Type_Node16: {
|
||||
auto *n16 = static_cast<Node16 *>(n);
|
||||
toFree.append(std::span<Node *>(n16->children, n16->numChildren));
|
||||
} break;
|
||||
case Type_Node48: {
|
||||
auto *n48 = static_cast<Node48 *>(n);
|
||||
toFree.append(std::span<Node *>(n48->children, n48->numChildren));
|
||||
} break;
|
||||
case Type_Node256: {
|
||||
auto *n256 = static_cast<Node256 *>(n);
|
||||
auto *out = toFree.unsafePrepareAppend(n256->numChildren).data();
|
||||
n256->bitSet.forEachSet([&](int i) { *out++ = n256->children[i]; });
|
||||
assert(out == toFree.end());
|
||||
} break;
|
||||
default: // GCOVR_EXCL_LINE
|
||||
__builtin_unreachable(); // GCOVR_EXCL_LINE
|
||||
}
|
||||
removeNode(n);
|
||||
safe_free(n, n->size());
|
||||
}
|
||||
}
|
||||
|
||||
void eraseBetween(Node3 *&n, int begin, int end, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
const unsigned shiftUpperBound = end - begin;
|
||||
const unsigned shiftAmount = begin;
|
||||
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
|
||||
Node **nodeOut = n->children;
|
||||
uint8_t *indexOut = n->index;
|
||||
InternalVersionT *maxVOut = n->childMaxVersion;
|
||||
for (int i = 0; i < n->numChildren; ++i) {
|
||||
if (inBounds(n->index[i])) {
|
||||
destroyTree(n->children[i], &tls->accum);
|
||||
} else {
|
||||
*nodeOut++ = n->children[i];
|
||||
*indexOut++ = n->index[i];
|
||||
*maxVOut++ = n->childMaxVersion[i];
|
||||
}
|
||||
}
|
||||
n->numChildren = nodeOut - n->children;
|
||||
|
||||
if (n->numChildren == 0) {
|
||||
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
}
|
||||
}
|
||||
|
||||
void eraseBetween(Node16 *&n, int begin, int end, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
const unsigned shiftUpperBound = end - begin;
|
||||
const unsigned shiftAmount = begin;
|
||||
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
|
||||
Node **nodeOut = n->children;
|
||||
uint8_t *indexOut = n->index;
|
||||
InternalVersionT *maxVOut = n->childMaxVersion;
|
||||
for (int i = 0; i < n->numChildren; ++i) {
|
||||
if (inBounds(n->index[i])) {
|
||||
destroyTree(n->children[i], &tls->accum);
|
||||
} else {
|
||||
*nodeOut++ = n->children[i];
|
||||
*indexOut++ = n->index[i];
|
||||
*maxVOut++ = n->childMaxVersion[i];
|
||||
}
|
||||
}
|
||||
n->numChildren = nodeOut - n->children;
|
||||
|
||||
if (n->numChildren == 0) {
|
||||
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node3::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
}
|
||||
}
|
||||
|
||||
void eraseBetween(Node48 *&n, int begin, int end, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
const unsigned shiftUpperBound = end - begin;
|
||||
const unsigned shiftAmount = begin;
|
||||
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
|
||||
Node **nodeOut = n->children;
|
||||
uint8_t *indexOut = n->reverseIndex;
|
||||
InternalVersionT *maxVOut = n->childMaxVersion;
|
||||
for (auto &v : n->maxOfMax) {
|
||||
v = tls->zero;
|
||||
}
|
||||
n->bitSet = {};
|
||||
memset(n->index, -1, sizeof(n->index));
|
||||
n->nextFree = 0;
|
||||
for (int i = 0; i < n->numChildren; ++i) {
|
||||
if (inBounds(n->reverseIndex[i])) {
|
||||
destroyTree(n->children[i], &tls->accum);
|
||||
} else {
|
||||
*nodeOut++ = n->children[i];
|
||||
*indexOut++ = n->reverseIndex[i];
|
||||
*maxVOut++ = n->childMaxVersion[i];
|
||||
n->maxOfMax[i >> Node48::kMaxOfMaxShift] = std::max(
|
||||
n->maxOfMax[i >> Node48::kMaxOfMaxShift], n->childMaxVersion[i]);
|
||||
n->bitSet.set(n->reverseIndex[i]);
|
||||
n->index[n->reverseIndex[i]] = n->nextFree++;
|
||||
}
|
||||
}
|
||||
n->numChildren = n->nextFree;
|
||||
|
||||
if (n->numChildren == 0) {
|
||||
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node3::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node16::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node16>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
}
|
||||
}
|
||||
|
||||
void eraseBetween(Node256 *&n, int begin, int end, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
const unsigned shiftUpperBound = end - begin;
|
||||
const unsigned shiftAmount = begin;
|
||||
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
|
||||
n->numChildren = 0;
|
||||
BitSet newBitSet;
|
||||
n->bitSet.forEachSet([&](int i) {
|
||||
if (inBounds(i)) {
|
||||
destroyTree(std::exchange(n->children[i], nullptr), &tls->accum);
|
||||
} else {
|
||||
++n->numChildren;
|
||||
newBitSet.set(i);
|
||||
}
|
||||
});
|
||||
n->bitSet = newBitSet;
|
||||
// Don't need to update childMaxVersion or maxOfMax because of monotonicity
|
||||
if (n->numChildren == 0) {
|
||||
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node3::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node16::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node16>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
} else if (n->numChildren <= Node48::kMaxNodes) {
|
||||
auto *newNode = tls->allocate<Node48>(n->partialKeyLen);
|
||||
newNode->copyChildrenAndKeyFrom(*n);
|
||||
getInTree(n, impl) = newNode;
|
||||
tls->release(n);
|
||||
(Node *&)n = newNode;
|
||||
}
|
||||
}
|
||||
|
||||
// Erase all nodes with a search path starting with n + [child],
|
||||
// where child in [begin, end).
|
||||
void eraseBetween(Node *&n, int begin, int end, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
#if DEBUG_VERBOSE && !defined(NDEBUG)
|
||||
fprintf(stderr, "eraseBetween: %s + [%d,%d)\n",
|
||||
getSearchPathPrintable(n).c_str(), begin, end);
|
||||
#endif
|
||||
switch (n->getType()) {
|
||||
case Type_Node0:
|
||||
break;
|
||||
case Type_Node3:
|
||||
eraseBetween((Node3 *&)n, begin, end, tls, impl);
|
||||
break;
|
||||
case Type_Node16:
|
||||
eraseBetween((Node16 *&)n, begin, end, tls, impl);
|
||||
break;
|
||||
case Type_Node48:
|
||||
eraseBetween((Node48 *&)n, begin, end, tls, impl);
|
||||
break;
|
||||
case Type_Node256:
|
||||
eraseBetween((Node256 *&)n, begin, end, tls, impl);
|
||||
break;
|
||||
default: // GCOVR_EXCL_LINE
|
||||
__builtin_unreachable(); // GCOVR_EXCL_LINE
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: self is not the root. May invalidate nodes along the search
|
||||
// path to self. May invalidate children of self->parent. Returns a pointer to
|
||||
// the node after self. If erase invalidates the pointee of `dontInvalidate`, it
|
||||
// will update it to its new pointee as well. Precondition: `self->entryPresent`
|
||||
// the node after self. If erase invalidates the pointee of `dontInvalidate`,
|
||||
// it will update it to its new pointee as well. Precondition:
|
||||
// `self->entryPresent`
|
||||
Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
|
||||
bool logical, Node *&dontInvalidate) {
|
||||
++tls->accum.entries_erased;
|
||||
@@ -1892,8 +2204,8 @@ bool checkPrefixRead(Node *n, const std::span<const uint8_t> key,
|
||||
remaining = remaining.subspan(commonLen, remaining.size() - commonLen);
|
||||
} else if (n->partialKeyLen > int(remaining.size())) {
|
||||
// n is the first physical node greater than remaining, and there's no
|
||||
// eq node. All physical nodes that start with prefix are reachable from
|
||||
// n.
|
||||
// eq node. All physical nodes that start with prefix are reachable
|
||||
// from n.
|
||||
if (maxVersion(n, impl) > readVersion) {
|
||||
return false;
|
||||
}
|
||||
@@ -1942,8 +2254,8 @@ compare16_32bit_avx512(const InternalVersionT *vs, InternalVersionT rv) {
|
||||
#endif
|
||||
// GCOVR_EXCL_STOP
|
||||
|
||||
// Returns true if v[i] <= readVersion for all i such that begin <= is[i] < end
|
||||
// Preconditions: begin <= end, end - begin < 256
|
||||
// Returns true if v[i] <= readVersion for all i such that begin <= is[i] <
|
||||
// end Preconditions: begin <= end, end - begin < 256
|
||||
template <bool kAVX512>
|
||||
bool scan16(const InternalVersionT *vs, const uint8_t *is, int begin, int end,
|
||||
InternalVersionT readVersion) {
|
||||
@@ -2074,9 +2386,10 @@ bool scan16(const InternalVersionT *vs, int begin, int end,
|
||||
#endif
|
||||
}
|
||||
|
||||
// Return whether or not the max version among all keys starting with the search
|
||||
// path of n + [child], where child in (begin, end) is <= readVersion. Does not
|
||||
// account for the range version of firstGt(searchpath(n) + [end - 1])
|
||||
// Return whether or not the max version among all keys starting with the
|
||||
// search path of n + [child], where child in (begin, end) is <= readVersion.
|
||||
// Does not account for the range version of firstGt(searchpath(n) + [end -
|
||||
// 1])
|
||||
template <bool kAVX512>
|
||||
bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
|
||||
InternalVersionT readVersion,
|
||||
@@ -2245,8 +2558,8 @@ bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
// [begin, end) is now the half-open interval of children we're interested
|
||||
// in.
|
||||
// [begin, end) is now the half-open interval of children we're
|
||||
// interested in.
|
||||
assert(begin < end);
|
||||
}
|
||||
|
||||
@@ -2281,8 +2594,8 @@ bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
// [begin, end) is now the half-open interval of children we're interested
|
||||
// in.
|
||||
// [begin, end) is now the half-open interval of children we're
|
||||
// interested in.
|
||||
assert(begin < end);
|
||||
}
|
||||
|
||||
@@ -2339,7 +2652,8 @@ checkMaxBetweenExclusive(Node *n, int begin, int end,
|
||||
// GCOVR_EXCL_STOP
|
||||
__attribute__((target("default")))
|
||||
#endif
|
||||
bool checkMaxBetweenExclusive(Node *n, int begin, int end,
|
||||
bool
|
||||
checkMaxBetweenExclusive(Node * n, int begin, int end,
|
||||
InternalVersionT readVersion, ReadContext *tls) {
|
||||
return checkMaxBetweenExclusiveImpl<false>(n, begin, end, readVersion, tls);
|
||||
}
|
||||
@@ -2361,8 +2675,8 @@ Vector<uint8_t> getSearchPath(Arena &arena, Node *n) {
|
||||
return result;
|
||||
} // GCOVR_EXCL_LINE
|
||||
|
||||
// Return true if the max version among all keys that start with key + [child],
|
||||
// where begin < child < end, is <= readVersion.
|
||||
// Return true if the max version among all keys that start with key +
|
||||
// [child], where begin < child < end, is <= readVersion.
|
||||
//
|
||||
// Precondition: transitively, no child of n has a search path that's a longer
|
||||
// prefix of key than n
|
||||
@@ -2432,8 +2746,8 @@ downLeftSpine:
|
||||
}
|
||||
|
||||
namespace {
|
||||
// Return true if the max version among all keys that start with key[:prefixLen]
|
||||
// that are >= key is <= readVersion
|
||||
// Return true if the max version among all keys that start with
|
||||
// key[:prefixLen] that are >= key is <= readVersion
|
||||
struct CheckRangeLeftSide {
|
||||
CheckRangeLeftSide(Node *n, std::span<const uint8_t> key, int prefixLen,
|
||||
InternalVersionT readVersion, ReadContext *tls)
|
||||
@@ -2547,8 +2861,8 @@ struct CheckRangeLeftSide {
|
||||
}
|
||||
};
|
||||
|
||||
// Return true if the max version among all keys that start with key[:prefixLen]
|
||||
// that are < key is <= readVersion
|
||||
// Return true if the max version among all keys that start with
|
||||
// key[:prefixLen] that are < key is <= readVersion
|
||||
struct CheckRangeRightSide {
|
||||
CheckRangeRightSide(Node *n, std::span<const uint8_t> key, int prefixLen,
|
||||
InternalVersionT readVersion, ReadContext *tls)
|
||||
@@ -2794,9 +3108,9 @@ checkMaxBetweenExclusiveImpl<true>(Node *n, int begin, int end,
|
||||
InternalVersionT readVersion, ReadContext *);
|
||||
#endif
|
||||
|
||||
// Returns a pointer the pointer to the newly inserted node in the tree. Caller
|
||||
// must set `entryPresent`, and `entry` fields. All nodes along the search path
|
||||
// of the result will have `maxVersion` set to `writeVersion` as a
|
||||
// Returns a pointer the pointer to the newly inserted node in the tree.
|
||||
// Caller must set `entryPresent`, and `entry` fields. All nodes along the
|
||||
// search path of the result will have `maxVersion` set to `writeVersion` as a
|
||||
// postcondition. Nodes along the search path may be invalidated.
|
||||
[[nodiscard]]
|
||||
Node **insert(Node **self, std::span<const uint8_t> key,
|
||||
@@ -2812,31 +3126,6 @@ Node **insert(Node **self, std::span<const uint8_t> key,
|
||||
return self;
|
||||
}
|
||||
|
||||
void destroyTree(Node *root) {
|
||||
Arena arena;
|
||||
auto toFree = vector<Node *>(arena);
|
||||
toFree.push_back(root);
|
||||
|
||||
#if SHOW_MEMORY
|
||||
for (auto *iter = root; iter != nullptr; iter = nextPhysical(iter)) {
|
||||
removeNode(iter);
|
||||
removeKey(iter);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (toFree.size() > 0) {
|
||||
auto *n = toFree.back();
|
||||
toFree.pop_back();
|
||||
// Add all children to toFree
|
||||
for (auto c = getChildGeq(n, 0); c != nullptr;
|
||||
c = getChildGeq(n, c->parentsIndex + 1)) {
|
||||
assert(c != nullptr);
|
||||
toFree.push_back(c);
|
||||
}
|
||||
safe_free(n, n->size());
|
||||
}
|
||||
}
|
||||
|
||||
void addPointWrite(Node *&root, std::span<const uint8_t> key,
|
||||
InternalVersionT writeVersion, WriteContext *tls,
|
||||
ConflictSet::Impl *impl) {
|
||||
@@ -2896,6 +3185,55 @@ void fixupMaxVersion(Node *node, ConflictSet::Impl *impl, WriteContext *tls) {
|
||||
setMaxVersion(node, impl, max);
|
||||
}
|
||||
|
||||
void addPrefixWrite(Node *&root, std::span<const uint8_t> begin,
|
||||
std::span<const uint8_t> end, InternalVersionT writeVersion,
|
||||
WriteContext *tls, ConflictSet::Impl *impl) {
|
||||
++tls->accum.prefix_writes;
|
||||
int lcp = begin.size() - 1;
|
||||
|
||||
Node **useAsRoot =
|
||||
insert(&root, begin.subspan(0, lcp), writeVersion, tls, impl);
|
||||
|
||||
auto *beginNode =
|
||||
*insert(useAsRoot, begin.subspan(lcp, 1), writeVersion, tls, impl);
|
||||
|
||||
const bool insertedBegin = !beginNode->entryPresent;
|
||||
|
||||
addKey(beginNode);
|
||||
beginNode->entryPresent = true;
|
||||
|
||||
if (insertedBegin) {
|
||||
++tls->accum.entries_inserted;
|
||||
auto *p = nextLogical(beginNode);
|
||||
beginNode->entry.rangeVersion =
|
||||
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
|
||||
beginNode->entry.pointVersion = writeVersion;
|
||||
}
|
||||
assert(writeVersion >= beginNode->entry.pointVersion);
|
||||
beginNode->entry.pointVersion = writeVersion;
|
||||
|
||||
auto *endNode =
|
||||
*insert(useAsRoot, end.subspan(lcp, 1), writeVersion, tls, impl);
|
||||
|
||||
const bool insertedEnd = !endNode->entryPresent;
|
||||
|
||||
addKey(endNode);
|
||||
endNode->entryPresent = true;
|
||||
|
||||
if (insertedEnd) {
|
||||
++tls->accum.entries_inserted;
|
||||
auto *p = nextLogical(endNode);
|
||||
endNode->entry.pointVersion =
|
||||
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
|
||||
}
|
||||
endNode->entry.rangeVersion = writeVersion;
|
||||
|
||||
eraseBetween(beginNode, 0, 256, tls, impl);
|
||||
|
||||
// Inserting end trashed endNode's maxVersion. Fix that
|
||||
fixupMaxVersion(endNode, impl, tls);
|
||||
}
|
||||
|
||||
void addWriteRange(Node *&root, std::span<const uint8_t> begin,
|
||||
std::span<const uint8_t> end, InternalVersionT writeVersion,
|
||||
WriteContext *tls, ConflictSet::Impl *impl) {
|
||||
@@ -2906,6 +3244,10 @@ void addWriteRange(Node *&root, std::span<const uint8_t> begin,
|
||||
end.back() == 0) {
|
||||
return addPointWrite(root, begin, writeVersion, tls, impl);
|
||||
}
|
||||
if (lcp == int(begin.size() - 1) && end.size() == begin.size() &&
|
||||
int(begin.back()) + 1 == int(end.back())) {
|
||||
return addPrefixWrite(root, begin, end, writeVersion, tls, impl);
|
||||
}
|
||||
++tls->accum.range_writes;
|
||||
const bool beginIsPrefix = lcp == int(begin.size());
|
||||
|
||||
@@ -3070,7 +3412,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
if (oldestExtantVersion < writeVersion - kMaxCorrectVersionWindow)
|
||||
[[unlikely]] {
|
||||
if (writeVersion > newestVersionFullPrecision + kNominalVersionWindow) {
|
||||
destroyTree(root);
|
||||
destroyTree(root, &tls.accum);
|
||||
init(writeVersion - kNominalVersionWindow);
|
||||
}
|
||||
|
||||
@@ -3108,6 +3450,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
|
||||
memory_bytes.set(totalBytes);
|
||||
point_writes_total.add(tls.accum.point_writes);
|
||||
prefix_writes_total.add(tls.accum.prefix_writes);
|
||||
range_writes_total.add(tls.accum.range_writes);
|
||||
nodes_allocated_total.add(tls.accum.nodes_allocated);
|
||||
nodes_released_total.add(tls.accum.nodes_released);
|
||||
@@ -3133,17 +3476,17 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
int64_t set_oldest_iterations_accum = 0;
|
||||
for (; fuel > 0 && n != nullptr; ++set_oldest_iterations_accum) {
|
||||
rezero(n, oldestVersion);
|
||||
// The "make sure gc keeps up with writes" calculations assume that we're
|
||||
// scanning key by key, not node by node. Make sure we only spend fuel
|
||||
// when there's a logical entry.
|
||||
// The "make sure gc keeps up with writes" calculations assume that
|
||||
// we're scanning key by key, not node by node. Make sure we only spend
|
||||
// fuel when there's a logical entry.
|
||||
fuel -= n->entryPresent;
|
||||
if (n->entryPresent && std::max(n->entry.pointVersion,
|
||||
n->entry.rangeVersion) <= oldestVersion) {
|
||||
// Any transaction n would have prevented from committing is
|
||||
// going to fail with TooOld anyway.
|
||||
|
||||
// There's no way to insert a range such that range version of the right
|
||||
// node is greater than the point version of the left node
|
||||
// There's no way to insert a range such that range version of the
|
||||
// right node is greater than the point version of the left node
|
||||
assert(n->entry.rangeVersion <= oldestVersion);
|
||||
Node *dummy = nullptr;
|
||||
n = erase(n, &tls, this, /*logical*/ false, dummy);
|
||||
@@ -3180,9 +3523,9 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
this->oldestVersion = oldestVersion;
|
||||
InternalVersionT::zero = tls.zero = oldestVersion;
|
||||
#ifdef NDEBUG
|
||||
// This is here for performance reasons, since we want to amortize the cost
|
||||
// of storing the search path as a string. In tests, we want to exercise the
|
||||
// rest of the code often.
|
||||
// This is here for performance reasons, since we want to amortize the
|
||||
// cost of storing the search path as a string. In tests, we want to
|
||||
// exercise the rest of the code often.
|
||||
if (keyUpdates < 100) {
|
||||
return;
|
||||
}
|
||||
@@ -3238,7 +3581,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
initMetrics();
|
||||
}
|
||||
~Impl() {
|
||||
destroyTree(root);
|
||||
destroyTree(root, &tls.accum);
|
||||
safe_free(metrics, metricsCount * sizeof(metrics[0]));
|
||||
}
|
||||
|
||||
@@ -3306,8 +3649,8 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
"Total number of checks where the result is \"too old\"");
|
||||
COUNTER(check_bytes_total, "Total number of key bytes checked");
|
||||
COUNTER(point_writes_total, "Total number of point writes");
|
||||
COUNTER(range_writes_total,
|
||||
"Total number of range writes (includes prefix writes)");
|
||||
COUNTER(prefix_writes_total, "Total number of prefix writes");
|
||||
COUNTER(range_writes_total, "Total number of range writes");
|
||||
GAUGE(memory_bytes, "Total number of bytes in use");
|
||||
COUNTER(nodes_allocated_total,
|
||||
"The total number of physical tree nodes allocated");
|
||||
@@ -3321,16 +3664,14 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
||||
"The total number of entries inserted in the tree");
|
||||
COUNTER(entries_erased_total,
|
||||
"The total number of entries erased from the tree");
|
||||
COUNTER(
|
||||
gc_iterations_total,
|
||||
"The total number of iterations of the main loop for garbage collection");
|
||||
COUNTER(gc_iterations_total, "The total number of iterations of the main "
|
||||
"loop for garbage collection");
|
||||
COUNTER(write_bytes_total, "Total number of key bytes in calls to addWrites");
|
||||
GAUGE(oldest_version,
|
||||
"The lowest version that doesn't result in \"TooOld\" for checks");
|
||||
GAUGE(newest_version, "The version of the most recent call to addWrites");
|
||||
GAUGE(
|
||||
oldest_extant_version,
|
||||
"A lower bound on the lowest version associated with an existing entry");
|
||||
GAUGE(oldest_extant_version, "A lower bound on the lowest version "
|
||||
"associated with an existing entry");
|
||||
// ==================== END METRICS DEFINITIONS ====================
|
||||
#undef GAUGE
|
||||
#undef COUNTER
|
||||
@@ -3705,8 +4046,8 @@ std::string strinc(std::string_view str, bool &ok) {
|
||||
if ((uint8_t &)(str[index]) != 255)
|
||||
break;
|
||||
|
||||
// Must not be called with a string that consists only of zero or more '\xff'
|
||||
// bytes.
|
||||
// Must not be called with a string that consists only of zero or more
|
||||
// '\xff' bytes.
|
||||
if (index < 0) {
|
||||
ok = false;
|
||||
return {};
|
||||
|
10
Internal.h
10
Internal.h
@@ -273,6 +273,16 @@ template <class T> struct Vector {
|
||||
size_ += slice.size();
|
||||
}
|
||||
|
||||
// Caller must write to the returned slice
|
||||
std::span<T> unsafePrepareAppend(int appendSize) {
|
||||
if (size_ + appendSize > capacity) {
|
||||
grow(std::max<int>(size_ + appendSize, capacity * 2));
|
||||
}
|
||||
auto result = std::span<T>(t + size_, appendSize);
|
||||
size_ += appendSize;
|
||||
return result;
|
||||
}
|
||||
|
||||
void push_back(const T &t) { append(std::span<const T>(&t, 1)); }
|
||||
|
||||
T *begin() { return t; }
|
||||
|
Reference in New Issue
Block a user