29 Commits

Author SHA1 Message Date
5a132799a4 Add cycles_total
Some checks failed
Tests / Clang total: 2840, passed: 2840
Clang |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Debug total: 2838, passed: 2838
weaselab/conflict-set/pipeline/head There was a failure building this commit
2024-08-15 15:13:00 -07:00
72469ebb6e Erase along left spine. Not faster 2024-08-15 15:07:44 -07:00
6c79847a42 Add instructions_total for linux 2024-08-15 15:06:53 -07:00
405a2ca161 Fix typo 2024-08-15 13:52:51 -07:00
f93466316a Pass in-tree reference to mergeWithChild 2024-08-15 13:52:06 -07:00
5626cd09d9 Add to corpus 2024-08-15 11:50:04 -07:00
41840220c3 Optimize version handling in mergeWithChild 2024-08-15 11:49:13 -07:00
7ff00e7846 Extract mergeWithChild to function 2024-08-15 11:40:52 -07:00
6242f40d48 Require that eraseBetween leave at least one child or entryPresent 2024-08-15 11:37:36 -07:00
403d70a1d3 Prefer not copying node in eraseBetween
If numChildren + entryPresent is enough, we don't have to copy even if
it would fit in a smaller node.

If we have to copy, we might as well use the smallest acceptable node
type.
2024-08-15 11:33:16 -07:00
9763452713 Separate beginIsPrefix path and simplify slightly 2024-08-15 11:29:15 -07:00
73d0593fca Remove separate prefix write codepath for now 2024-08-14 21:29:43 -07:00
23c2a3e1c6 SIMD for eraseBetween (Node16)
Some checks failed
Tests / Clang total: 2688, passed: 2688
Clang |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Debug total: 2686, passed: 2686
weaselab/conflict-set/pipeline/head There was a failure building this commit
2024-08-14 18:12:46 -07:00
a64e792964 Remove unused function 2024-08-14 17:40:04 -07:00
5e362d5330 Add to corpus 2024-08-14 17:37:18 -07:00
cc526cb6ba Call eraseBetween on useAsRoot in addWriteRange 2024-08-14 17:08:55 -07:00
7e49888bec More eraseBetween optimizations 2024-08-14 16:40:29 -07:00
e64ebabced eraseBetween optimizations 2024-08-14 16:13:37 -07:00
1e34951a77 Fix use-of-uninit in eraseBetween (Node256) 2024-08-14 15:25:10 -07:00
baf64520d6 Have eraseBetween take in-tree node by reference 2024-08-14 15:04:11 -07:00
3499626127 Fix potential strict aliasing issues 2024-08-14 15:01:34 -07:00
b7f9084694 destroyTree -> eraseTree. Use freelist 2024-08-14 14:47:22 -07:00
4b82502946 Accept node by ref for eraseBetween again 2024-08-14 14:43:19 -07:00
68bbacb69a Use getInTree in eraseBetween 2024-08-14 14:43:19 -07:00
3078845673 Fix nodes_released accounting 2024-08-14 14:43:19 -07:00
43f6126cc4 Add a missing assert, call to removeNode 2024-08-14 14:43:19 -07:00
b911d87d55 eraseBetween bug fixes 2024-08-14 14:43:19 -07:00
0c65a82b78 Separate codepath for prefix writes
Uses the newly-added eraseBetween
2024-08-14 14:43:19 -07:00
e024cb8291 Track entriesErased in destroyTree 2024-08-14 14:43:19 -07:00
148 changed files with 609 additions and 184 deletions

View File

@@ -361,7 +361,21 @@ void benchWorstCaseForRadixRangeRead() {
void benchCreateAndDestroy() {
ankerl::nanobench::Bench bench;
bench.run("create and destroy", [&]() { ConflictSet cs{0}; });
bench.run("create and destroy", [&]() {
ConflictSet cs{0};
ConflictSet::WriteRange w;
uint8_t b[9];
b[8] = 0;
for (int64_t i = 0; i < 1000; i += 7) {
auto x = __builtin_bswap64(i);
memcpy(b, &x, 8);
w.begin.p = b;
w.begin.len = 8;
w.end.len = 0;
w.end.p = b;
cs.addWrites(&w, 1, 1);
}
});
}
int main(void) {

View File

@@ -195,7 +195,6 @@ struct Node {
/* end section that's copied to the next node */
uint8_t *partialKey();
size_t size() const;
Type getType() const { return type; }
int32_t getCapacity() const { return partialKeyCapacity; }
@@ -224,6 +223,9 @@ struct Node0 : Node {
void copyChildrenAndKeyFrom(const Node0 &other);
void copyChildrenAndKeyFrom(const struct Node3 &other);
void copyChildrenAndKeyFrom(const struct Node16 &other);
void copyChildrenAndKeyFrom(const struct Node48 &other);
void copyChildrenAndKeyFrom(const struct Node256 &other);
size_t size() const { return sizeof(Node0) + getCapacity(); }
};
@@ -240,6 +242,8 @@ struct Node3 : Node {
void copyChildrenAndKeyFrom(const Node0 &other);
void copyChildrenAndKeyFrom(const Node3 &other);
void copyChildrenAndKeyFrom(const struct Node16 &other);
void copyChildrenAndKeyFrom(const struct Node48 &other);
void copyChildrenAndKeyFrom(const struct Node256 &other);
size_t size() const { return sizeof(Node3) + getCapacity(); }
};
@@ -256,6 +260,7 @@ struct Node16 : Node {
void copyChildrenAndKeyFrom(const Node3 &other);
void copyChildrenAndKeyFrom(const Node16 &other);
void copyChildrenAndKeyFrom(const struct Node48 &other);
void copyChildrenAndKeyFrom(const struct Node256 &other);
size_t size() const { return sizeof(Node16) + getCapacity(); }
};
@@ -313,6 +318,24 @@ inline void Node0::copyChildrenAndKeyFrom(const Node3 &other) {
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node0::copyChildrenAndKeyFrom(const Node16 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node0::copyChildrenAndKeyFrom(const Node48 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node0::copyChildrenAndKeyFrom(const Node256 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node3::copyChildrenAndKeyFrom(const Node0 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
@@ -344,17 +367,53 @@ inline void Node3::copyChildrenAndKeyFrom(const Node16 &other) {
}
}
inline void Node3::copyChildrenAndKeyFrom(const Node48 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
int i = 0;
other.bitSet.forEachSet([&](int c) {
// Suppress a false positive -Waggressive-loop-optimizations warning
// in gcc
assume(i < kMaxNodes);
index[i] = c;
children[i] = other.children[other.index[c]];
childMaxVersion[i] = other.childMaxVersion[other.index[c]];
assert(children[i]->parent == &other);
children[i]->parent = this;
++i;
});
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node3::copyChildrenAndKeyFrom(const Node256 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
int i = 0;
other.bitSet.forEachSet([&](int c) {
// Suppress a false positive -Waggressive-loop-optimizations warning
// in gcc
assume(i < kMaxNodes);
index[i] = c;
children[i] = other.children[c];
childMaxVersion[i] = other.childMaxVersion[c];
assert(children[i]->parent == &other);
children[i]->parent = this;
++i;
});
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node16::copyChildrenAndKeyFrom(const Node3 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
memcpy(index, other.index, Node3::kMaxNodes);
memcpy(index, other.index, other.kMaxNodes);
memcpy(children, other.children,
Node3::kMaxNodes * sizeof(children[0])); // NOLINT
other.kMaxNodes * sizeof(children[0])); // NOLINT
memcpy(childMaxVersion, other.childMaxVersion,
Node3::kMaxNodes * sizeof(childMaxVersion[0]));
other.kMaxNodes * sizeof(childMaxVersion[0]));
memcpy(partialKey(), &other + 1, partialKeyLen);
assert(numChildren == Node3::kMaxNodes);
for (int i = 0; i < Node3::kMaxNodes; ++i) {
assert(numChildren == other.kMaxNodes);
for (int i = 0; i < other.kMaxNodes; ++i) {
assert(children[i]->parent == &other);
children[i]->parent = this;
}
@@ -380,7 +439,7 @@ inline void Node16::copyChildrenAndKeyFrom(const Node48 &other) {
other.bitSet.forEachSet([&](int c) {
// Suppress a false positive -Waggressive-loop-optimizations warning
// in gcc
assume(i < Node16::kMaxNodes);
assume(i < kMaxNodes);
index[i] = c;
children[i] = other.children[other.index[c]];
childMaxVersion[i] = other.childMaxVersion[other.index[c]];
@@ -391,10 +450,28 @@ inline void Node16::copyChildrenAndKeyFrom(const Node48 &other) {
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node16::copyChildrenAndKeyFrom(const Node256 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
int i = 0;
other.bitSet.forEachSet([&](int c) {
// Suppress a false positive -Waggressive-loop-optimizations warning
// in gcc
assume(i < kMaxNodes);
index[i] = c;
children[i] = other.children[c];
childMaxVersion[i] = other.childMaxVersion[c];
assert(children[i]->parent == &other);
children[i]->parent = this;
++i;
});
memcpy(partialKey(), &other + 1, partialKeyLen);
}
inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
kNodeCopySize);
assert(numChildren == Node16::kMaxNodes);
assert(numChildren == other.kMaxNodes);
memset(index, -1, sizeof(index));
memset(children, 0, sizeof(children));
const auto z = InternalVersionT::zero;
@@ -403,7 +480,7 @@ inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
}
memcpy(partialKey(), &other + 1, partialKeyLen);
bitSet.init();
nextFree = Node16::kMaxNodes;
nextFree = other.kMaxNodes;
int i = 0;
for (auto x : other.index) {
bitSet.set(x);
@@ -413,8 +490,8 @@ inline void Node48::copyChildrenAndKeyFrom(const Node16 &other) {
assert(children[i]->parent == &other);
children[i]->parent = this;
reverseIndex[i] = x;
maxOfMax[i >> Node48::kMaxOfMaxShift] =
std::max(maxOfMax[i >> Node48::kMaxOfMaxShift], childMaxVersion[i]);
maxOfMax[i >> kMaxOfMaxShift] =
std::max(maxOfMax[i >> kMaxOfMaxShift], childMaxVersion[i]);
++i;
}
}
@@ -456,15 +533,15 @@ inline void Node48::copyChildrenAndKeyFrom(const Node256 &other) {
bitSet.forEachSet([&](int c) {
// Suppress a false positive -Waggressive-loop-optimizations warning
// in gcc.
assume(i < Node48::kMaxNodes);
assume(i < kMaxNodes);
index[c] = i;
children[i] = other.children[c];
childMaxVersion[i] = other.childMaxVersion[c];
assert(children[i]->parent == &other);
children[i]->parent = this;
reverseIndex[i] = c;
maxOfMax[i >> Node48::kMaxOfMaxShift] =
std::max(maxOfMax[i >> Node48::kMaxOfMaxShift], childMaxVersion[i]);
maxOfMax[i >> kMaxOfMaxShift] =
std::max(maxOfMax[i >> kMaxOfMaxShift], childMaxVersion[i]);
++i;
});
memcpy(partialKey(), &other + 1, partialKeyLen);
@@ -487,8 +564,8 @@ inline void Node256::copyChildrenAndKeyFrom(const Node48 &other) {
childMaxVersion[c] = other.childMaxVersion[other.index[c]];
assert(children[c]->parent == &other);
children[c]->parent = this;
maxOfMax[c >> Node256::kMaxOfMaxShift] =
std::max(maxOfMax[c >> Node256::kMaxOfMaxShift], childMaxVersion[c]);
maxOfMax[c >> kMaxOfMaxShift] =
std::max(maxOfMax[c >> kMaxOfMaxShift], childMaxVersion[c]);
});
memcpy(partialKey(), &other + 1, partialKeyLen);
}
@@ -693,23 +770,6 @@ uint8_t *Node::partialKey() {
}
}
size_t Node::size() const {
switch (type) {
case Type_Node0:
return ((Node0 *)this)->size();
case Type_Node3:
return ((Node3 *)this)->size();
case Type_Node16:
return ((Node16 *)this)->size();
case Type_Node48:
return ((Node48 *)this)->size();
case Type_Node256:
return ((Node256 *)this)->size();
default: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
}
}
// A type that's plumbed along the check call tree. Lifetime ends after each
// check call.
struct ReadContext {
@@ -1544,24 +1604,9 @@ void rezero(Node *n, InternalVersionT z) {
}
}
void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
Node *&dontInvalidate) {
#if DEBUG_VERBOSE && !defined(NDEBUG)
fprintf(stderr, "maybeDownsize: %s\n", getSearchPathPrintable(self).c_str());
#endif
switch (self->getType()) {
case Type_Node0: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
case Type_Node3: {
auto *self3 = (Node3 *)self;
if (self->numChildren == 0) {
auto *newSelf = tls->allocate<Node0>(self->partialKeyLen);
newSelf->copyChildrenAndKeyFrom(*self3);
getInTree(self, impl) = newSelf;
tls->release(self3);
} else if (self->numChildren == 1 && !self->entryPresent) {
void mergeWithChild(Node *&self, WriteContext *tls, ConflictSet::Impl *impl,
Node *&dontInvalidate, Node3 *self3) {
assert(!self3->entryPresent);
auto *child = self3->children[0];
int minCapacity = self3->partialKeyLen + 1 + child->partialKeyLen;
@@ -1575,16 +1620,15 @@ void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
// Merge partial key with child
#if DEBUG_VERBOSE && !defined(NDEBUG)
fprintf(stderr, "Merge %s into %s\n",
getSearchPathPrintable(self).c_str(),
fprintf(stderr, "Merge %s into %s\n", getSearchPathPrintable(self).c_str(),
getSearchPathPrintable(child).c_str());
#endif
InternalVersionT childMaxVersion = maxVersion(child, impl);
InternalVersionT childMaxVersion = self3->childMaxVersion[0];
// Construct new partial key for child
memmove(child->partialKey() + self3->partialKeyLen + 1,
child->partialKey(), child->partialKeyLen);
memmove(child->partialKey() + self3->partialKeyLen + 1, child->partialKey(),
child->partialKeyLen);
memcpy(child->partialKey(), self3->partialKey(), self->partialKeyLen);
child->partialKey()[self3->partialKeyLen] = self3->index[0];
child->partialKeyLen += 1 + self3->partialKeyLen;
@@ -1594,13 +1638,32 @@ void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
// Max versions are stored in the parent, so we need to update it now
// that we have a new parent.
setMaxVersion(child, impl, childMaxVersion);
if (child->parent) {
rezero(child->parent, tls->zero);
}
setMaxVersion(child, impl, std::max(childMaxVersion, tls->zero));
getInTree(self, impl) = child;
self = child;
tls->release(self3);
}
void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
Node *&dontInvalidate) {
#if DEBUG_VERBOSE && !defined(NDEBUG)
fprintf(stderr, "maybeDownsize: %s\n", getSearchPathPrintable(self).c_str());
#endif
switch (self->getType()) {
case Type_Node0: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
case Type_Node3: {
auto *self3 = (Node3 *)self;
if (self->numChildren == 0) {
assert(self->entryPresent);
auto *newSelf = tls->allocate<Node0>(self->partialKeyLen);
newSelf->copyChildrenAndKeyFrom(*self3);
getInTree(self, impl) = newSelf;
tls->release(self3);
} else if (self->numChildren == 1 && !self->entryPresent) {
mergeWithChild(getInTree(self, impl), tls, impl, dontInvalidate, self3);
}
} break;
case Type_Node16:
@@ -1635,10 +1698,266 @@ void maybeDownsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
}
}
void eraseTree(Node *root, WriteContext *tls) {
Arena arena;
auto toFree = vector<Node *>(arena);
toFree.push_back(root);
while (toFree.size() > 0) {
auto *n = toFree.back();
toFree.pop_back();
tls->accum.entries_erased += n->entryPresent;
++tls->accum.nodes_released;
removeNode(n);
removeKey(n);
switch (n->getType()) {
case Type_Node0: {
auto *n0 = static_cast<Node0 *>(n);
tls->release(n0);
} break;
case Type_Node3: {
auto *n3 = static_cast<Node3 *>(n);
toFree.append(std::span<Node *>(n3->children, n3->numChildren));
tls->release(n3);
} break;
case Type_Node16: {
auto *n16 = static_cast<Node16 *>(n);
toFree.append(std::span<Node *>(n16->children, n16->numChildren));
tls->release(n16);
} break;
case Type_Node48: {
auto *n48 = static_cast<Node48 *>(n);
toFree.append(std::span<Node *>(n48->children, n48->numChildren));
tls->release(n48);
} break;
case Type_Node256: {
auto *n256 = static_cast<Node256 *>(n);
auto *out = toFree.unsafePrepareAppend(n256->numChildren).data();
n256->bitSet.forEachSet([&](int i) { *out++ = n256->children[i]; });
assert(out == toFree.end());
tls->release(n256);
} break;
default: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
}
}
}
void eraseBetween(Node **inTree, Node3 *n, int begin, int end,
WriteContext *tls) {
const unsigned shiftUpperBound = end - begin;
const unsigned shiftAmount = begin;
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
Node **nodeOut = n->children;
uint8_t *indexOut = n->index;
InternalVersionT *maxVOut = n->childMaxVersion;
for (int i = 0; i < n->numChildren; ++i) {
if (inBounds(n->index[i])) {
eraseTree(n->children[i], tls);
} else {
*nodeOut++ = n->children[i];
*indexOut++ = n->index[i];
*maxVOut++ = n->childMaxVersion[i];
}
}
n->numChildren = nodeOut - n->children;
if (n->numChildren == 0) {
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
}
}
void eraseBetween(Node **inTree, Node16 *n, int begin, int end,
WriteContext *tls) {
if (end - begin == 256) {
for (int i = 0; i < n->numChildren; ++i) {
eraseTree(n->children[i], tls);
}
n->numChildren = 0;
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
return;
}
assert(end - begin < 256);
#ifdef HAS_ARM_NEON
uint8x16_t indices;
memcpy(&indices, n->index, 16);
// 0xff for each in bounds
auto results =
vcltq_u8(vsubq_u8(indices, vdupq_n_u8(begin)), vdupq_n_u8(end - begin));
// 0xf for each 0xff
uint64_t mask = vget_lane_u64(
vreinterpret_u64_u8(vshrn_n_u16(vreinterpretq_u16_u8(results), 4)), 0);
#elif defined(HAS_AVX)
__m128i indices;
memcpy(&indices, n->index, 16);
indices = _mm_sub_epi8(indices, _mm_set1_epi8(begin));
uint32_t mask = ~_mm_movemask_epi8(_mm_cmpeq_epi8(
indices, _mm_max_epu8(indices, _mm_set1_epi8(end - begin))));
#else
const unsigned shiftUpperBound = end - begin;
const unsigned shiftAmount = begin;
auto inBounds = [&](unsigned c) { return c - shiftAmount < shiftUpperBound; };
uint32_t mask = 0;
for (int i = 0; i < 16; ++i) {
mask |= inBounds(is[i]) << i;
}
#endif
mask &= (decltype(mask)(1) << n->numChildren) - 1;
if (!mask) {
return;
}
int first = std::countr_zero(mask);
int count = std::popcount(mask);
n->numChildren -= count;
for (int i = first; i < first + count; ++i) {
eraseTree(n->children[i], tls);
}
for (int i = first; i < n->numChildren; ++i) {
n->children[i] = n->children[i + count];
n->childMaxVersion[i] = n->childMaxVersion[i + count];
n->index[i] = n->index[i + count];
}
if (n->numChildren + n->entryPresent >= kMinChildrenNode16) {
// nop
} else if (n->numChildren > 0) {
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else {
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
}
}
void eraseBetween(Node **inTree, Node48 *n, int begin, int end,
WriteContext *tls) {
for (int i = n->bitSet.firstSetGeq(begin); i >= 0 && i < end;
i = n->bitSet.firstSetGeq(i)) {
n->bitSet.reset(i);
int8_t toRemoveChildrenIndex = std::exchange(n->index[i], -1);
int8_t lastChildrenIndex = --n->nextFree;
assert(toRemoveChildrenIndex >= 0);
assert(lastChildrenIndex >= 0);
eraseTree(n->children[toRemoveChildrenIndex], tls);
if (toRemoveChildrenIndex != lastChildrenIndex) {
n->children[toRemoveChildrenIndex] = n->children[lastChildrenIndex];
n->childMaxVersion[toRemoveChildrenIndex] =
n->childMaxVersion[lastChildrenIndex];
n->maxOfMax[toRemoveChildrenIndex >> Node48::kMaxOfMaxShift] =
std::max(n->maxOfMax[toRemoveChildrenIndex >> Node48::kMaxOfMaxShift],
n->childMaxVersion[toRemoveChildrenIndex]);
auto parentIndex = n->children[toRemoveChildrenIndex]->parentsIndex;
n->index[parentIndex] = toRemoveChildrenIndex;
n->reverseIndex[toRemoveChildrenIndex] = parentIndex;
}
n->childMaxVersion[lastChildrenIndex] = tls->zero;
--n->numChildren;
}
if (n->numChildren + n->entryPresent >= kMinChildrenNode48) {
// nop
} else if (n->numChildren > Node3::kMaxNodes) {
auto *newNode = tls->allocate<Node16>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else if (n->numChildren > 0) {
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else {
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
}
}
void eraseBetween(Node **inTree, Node256 *n, int begin, int end,
WriteContext *tls) {
for (int i = n->bitSet.firstSetGeq(begin); i >= 0 && i < end;
i = n->bitSet.firstSetGeq(i)) {
assert(n->children[i] != nullptr);
eraseTree(std::exchange(n->children[i], nullptr), tls);
n->bitSet.reset(i);
--n->numChildren;
}
if (n->numChildren + n->entryPresent >= kMinChildrenNode256) {
// nop
} else if (n->numChildren > Node16::kMaxNodes) {
auto *newNode = tls->allocate<Node48>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else if (n->numChildren > Node3::kMaxNodes) {
auto *newNode = tls->allocate<Node16>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else if (n->numChildren > 0) {
auto *newNode = tls->allocate<Node3>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
} else {
auto *newNode = tls->allocate<Node0>(n->partialKeyLen);
newNode->copyChildrenAndKeyFrom(*n);
tls->release(n);
*inTree = newNode;
}
}
// Erase all nodes with a search path starting with n + [child],
// where child in [begin, end). To avoid the need to propagate erases up the
// search path, the caller must ensure that the result has at least one child or
// has entryPresent.
void eraseBetween(Node *&n, int begin, int end, WriteContext *tls) {
#if DEBUG_VERBOSE && !defined(NDEBUG)
fprintf(stderr, "eraseBetween: %s + [%d,%d)\n",
getSearchPathPrintable(n).c_str(), begin, end);
#endif
switch (n->getType()) {
case Type_Node0:
break;
case Type_Node3:
eraseBetween(&n, (Node3 *)n, begin, end, tls);
break;
case Type_Node16:
eraseBetween(&n, (Node16 *)n, begin, end, tls);
break;
case Type_Node48:
eraseBetween(&n, (Node48 *)n, begin, end, tls);
break;
case Type_Node256:
eraseBetween(&n, (Node256 *)n, begin, end, tls);
break;
default: // GCOVR_EXCL_LINE
__builtin_unreachable(); // GCOVR_EXCL_LINE
}
assert(n->numChildren > 0 || n->entryPresent);
}
// Precondition: self is not the root. May invalidate nodes along the search
// path to self. May invalidate children of self->parent. Returns a pointer to
// the node after self. If erase invalidates the pointee of `dontInvalidate`, it
// will update it to its new pointee as well. Precondition: `self->entryPresent`
// the node after self. If erase invalidates the pointee of `dontInvalidate`,
// it will update it to its new pointee as well. Precondition:
// `self->entryPresent`
Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl,
bool logical, Node *&dontInvalidate) {
++tls->accum.entries_erased;
@@ -1892,8 +2211,8 @@ bool checkPrefixRead(Node *n, const std::span<const uint8_t> key,
remaining = remaining.subspan(commonLen, remaining.size() - commonLen);
} else if (n->partialKeyLen > int(remaining.size())) {
// n is the first physical node greater than remaining, and there's no
// eq node. All physical nodes that start with prefix are reachable from
// n.
// eq node. All physical nodes that start with prefix are reachable
// from n.
if (maxVersion(n, impl) > readVersion) {
return false;
}
@@ -1942,8 +2261,8 @@ compare16_32bit_avx512(const InternalVersionT *vs, InternalVersionT rv) {
#endif
// GCOVR_EXCL_STOP
// Returns true if v[i] <= readVersion for all i such that begin <= is[i] < end
// Preconditions: begin <= end, end - begin < 256
// Returns true if v[i] <= readVersion for all i such that begin <= is[i] <
// end Preconditions: begin <= end, end - begin < 256
template <bool kAVX512>
bool scan16(const InternalVersionT *vs, const uint8_t *is, int begin, int end,
InternalVersionT readVersion) {
@@ -2074,9 +2393,10 @@ bool scan16(const InternalVersionT *vs, int begin, int end,
#endif
}
// Return whether or not the max version among all keys starting with the search
// path of n + [child], where child in (begin, end) is <= readVersion. Does not
// account for the range version of firstGt(searchpath(n) + [end - 1])
// Return whether or not the max version among all keys starting with the
// search path of n + [child], where child in (begin, end) is <= readVersion.
// Does not account for the range version of firstGt(searchpath(n) + [end -
// 1])
template <bool kAVX512>
bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
InternalVersionT readVersion,
@@ -2245,8 +2565,8 @@ bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
} else {
return true;
}
// [begin, end) is now the half-open interval of children we're interested
// in.
// [begin, end) is now the half-open interval of children we're
// interested in.
assert(begin < end);
}
@@ -2281,8 +2601,8 @@ bool checkMaxBetweenExclusiveImpl(Node *n, int begin, int end,
} else {
return true;
}
// [begin, end) is now the half-open interval of children we're interested
// in.
// [begin, end) is now the half-open interval of children we're
// interested in.
assert(begin < end);
}
@@ -2339,7 +2659,8 @@ checkMaxBetweenExclusive(Node *n, int begin, int end,
// GCOVR_EXCL_STOP
__attribute__((target("default")))
#endif
bool checkMaxBetweenExclusive(Node *n, int begin, int end,
bool
checkMaxBetweenExclusive(Node * n, int begin, int end,
InternalVersionT readVersion, ReadContext *tls) {
return checkMaxBetweenExclusiveImpl<false>(n, begin, end, readVersion, tls);
}
@@ -2361,8 +2682,8 @@ Vector<uint8_t> getSearchPath(Arena &arena, Node *n) {
return result;
} // GCOVR_EXCL_LINE
// Return true if the max version among all keys that start with key + [child],
// where begin < child < end, is <= readVersion.
// Return true if the max version among all keys that start with key +
// [child], where begin < child < end, is <= readVersion.
//
// Precondition: transitively, no child of n has a search path that's a longer
// prefix of key than n
@@ -2432,8 +2753,8 @@ downLeftSpine:
}
namespace {
// Return true if the max version among all keys that start with key[:prefixLen]
// that are >= key is <= readVersion
// Return true if the max version among all keys that start with
// key[:prefixLen] that are >= key is <= readVersion
struct CheckRangeLeftSide {
CheckRangeLeftSide(Node *n, std::span<const uint8_t> key, int prefixLen,
InternalVersionT readVersion, ReadContext *tls)
@@ -2547,8 +2868,8 @@ struct CheckRangeLeftSide {
}
};
// Return true if the max version among all keys that start with key[:prefixLen]
// that are < key is <= readVersion
// Return true if the max version among all keys that start with
// key[:prefixLen] that are < key is <= readVersion
struct CheckRangeRightSide {
CheckRangeRightSide(Node *n, std::span<const uint8_t> key, int prefixLen,
InternalVersionT readVersion, ReadContext *tls)
@@ -2794,9 +3115,9 @@ checkMaxBetweenExclusiveImpl<true>(Node *n, int begin, int end,
InternalVersionT readVersion, ReadContext *);
#endif
// Returns a pointer the pointer to the newly inserted node in the tree. Caller
// must set `entryPresent`, and `entry` fields. All nodes along the search path
// of the result will have `maxVersion` set to `writeVersion` as a
// Returns a pointer the pointer to the newly inserted node in the tree.
// Caller must set `entryPresent`, and `entry` fields. All nodes along the
// search path of the result will have `maxVersion` set to `writeVersion` as a
// postcondition. Nodes along the search path may be invalidated.
[[nodiscard]]
Node **insert(Node **self, std::span<const uint8_t> key,
@@ -2812,31 +3133,6 @@ Node **insert(Node **self, std::span<const uint8_t> key,
return self;
}
void destroyTree(Node *root) {
Arena arena;
auto toFree = vector<Node *>(arena);
toFree.push_back(root);
#if SHOW_MEMORY
for (auto *iter = root; iter != nullptr; iter = nextPhysical(iter)) {
removeNode(iter);
removeKey(iter);
}
#endif
while (toFree.size() > 0) {
auto *n = toFree.back();
toFree.pop_back();
// Add all children to toFree
for (auto c = getChildGeq(n, 0); c != nullptr;
c = getChildGeq(n, c->parentsIndex + 1)) {
assert(c != nullptr);
toFree.push_back(c);
}
safe_free(n, n->size());
}
}
void addPointWrite(Node *&root, std::span<const uint8_t> key,
InternalVersionT writeVersion, WriteContext *tls,
ConflictSet::Impl *impl) {
@@ -2911,46 +3207,31 @@ void addWriteRange(Node *&root, std::span<const uint8_t> begin,
Node **useAsRoot =
insert(&root, begin.subspan(0, lcp), writeVersion, tls, impl);
begin = begin.subspan(lcp, begin.size() - lcp);
end = end.subspan(lcp, end.size() - lcp);
int consumed = lcp;
begin = begin.subspan(consumed, begin.size() - consumed);
end = end.subspan(consumed, end.size() - consumed);
auto *beginNode = *insert(useAsRoot, begin, writeVersion, tls, impl);
const bool insertedBegin = !beginNode->entryPresent;
if (beginIsPrefix) {
auto *beginNode = *useAsRoot;
addKey(beginNode);
beginNode->entryPresent = true;
if (insertedBegin) {
if (!beginNode->entryPresent) {
++tls->accum.entries_inserted;
auto *p = nextLogical(beginNode);
beginNode->entry.rangeVersion =
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
beginNode->entry.pointVersion = writeVersion;
beginNode->entryPresent = true;
}
assert(writeVersion >= beginNode->entry.pointVersion);
beginNode->entry.pointVersion = writeVersion;
auto *endNode = *insert(useAsRoot, end, writeVersion, tls, impl);
const bool insertedEnd = !endNode->entryPresent;
addKey(endNode);
endNode->entryPresent = true;
if (insertedEnd) {
if (!endNode->entryPresent) {
++tls->accum.entries_inserted;
auto *p = nextLogical(endNode);
endNode->entry.pointVersion =
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
if (beginIsPrefix) {
// beginNode may have been invalidated when inserting end
beginNode = *useAsRoot;
assert(beginNode->entryPresent);
}
endNode->entryPresent = true;
}
endNode->entry.rangeVersion = writeVersion;
@@ -2960,6 +3241,59 @@ void addWriteRange(Node *&root, std::span<const uint8_t> begin,
// Inserting end trashed endNode's maxVersion. Fix that
fixupMaxVersion(endNode, impl, tls);
} else /*!beginIsPrefix*/ {
auto *beginNode = *insert(useAsRoot, begin, writeVersion, tls, impl);
addKey(beginNode);
if (!beginNode->entryPresent) {
++tls->accum.entries_inserted;
auto *p = nextLogical(beginNode);
beginNode->entry.rangeVersion =
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
beginNode->entryPresent = true;
}
beginNode->entry.pointVersion = writeVersion;
auto *endNode = *insert(useAsRoot, end, writeVersion, tls, impl);
addKey(endNode);
if (!endNode->entryPresent) {
++tls->accum.entries_inserted;
auto *p = nextLogical(endNode);
endNode->entry.pointVersion =
p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero);
endNode->entryPresent = true;
}
endNode->entry.rangeVersion = writeVersion;
eraseBetween(*useAsRoot, begin[0] + 1, end[0], tls);
// // Erase along left spine
// for (auto [n, key] = std::make_tuple(useAsRoot, begin);;) {
// auto before = key;
// auto **child = &getOrCreateChild(*n, key, writeVersion, tls);
// if (key.size() > 0) {
// eraseBetween(*child, int(key[0]) + 1, 256, tls);
// if ((*child)->numChildren == 1 && !(*child)->entryPresent) {
// Node *dummy = nullptr;
// mergeWithChild(*child, tls, impl, dummy,
// static_cast<Node3 *>(*child));
// key = before;
// continue;
// }
// } else {
// eraseBetween(*child, 0, 256, tls);
// beginNode = *child;
// break;
// }
// n = child;
// }
for (beginNode = nextLogical(beginNode); beginNode != endNode;
beginNode = erase(beginNode, tls, impl, /*logical*/ true, endNode)) {
}
// Inserting end trashed endNode's maxVersion. Fix that
fixupMaxVersion(endNode, impl, tls);
}
}
Node *firstGeqPhysical(Node *n, const std::span<const uint8_t> key) {
@@ -3070,7 +3404,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
if (oldestExtantVersion < writeVersion - kMaxCorrectVersionWindow)
[[unlikely]] {
if (writeVersion > newestVersionFullPrecision + kNominalVersionWindow) {
destroyTree(root);
eraseTree(root, &tls);
init(writeVersion - kNominalVersionWindow);
}
@@ -3133,17 +3467,17 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
int64_t set_oldest_iterations_accum = 0;
for (; fuel > 0 && n != nullptr; ++set_oldest_iterations_accum) {
rezero(n, oldestVersion);
// The "make sure gc keeps up with writes" calculations assume that we're
// scanning key by key, not node by node. Make sure we only spend fuel
// when there's a logical entry.
// The "make sure gc keeps up with writes" calculations assume that
// we're scanning key by key, not node by node. Make sure we only spend
// fuel when there's a logical entry.
fuel -= n->entryPresent;
if (n->entryPresent && std::max(n->entry.pointVersion,
n->entry.rangeVersion) <= oldestVersion) {
// Any transaction n would have prevented from committing is
// going to fail with TooOld anyway.
// There's no way to insert a range such that range version of the right
// node is greater than the point version of the left node
// There's no way to insert a range such that range version of the
// right node is greater than the point version of the left node
assert(n->entry.rangeVersion <= oldestVersion);
Node *dummy = nullptr;
n = erase(n, &tls, this, /*logical*/ false, dummy);
@@ -3180,9 +3514,9 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
this->oldestVersion = oldestVersion;
InternalVersionT::zero = tls.zero = oldestVersion;
#ifdef NDEBUG
// This is here for performance reasons, since we want to amortize the cost
// of storing the search path as a string. In tests, we want to exercise the
// rest of the code often.
// This is here for performance reasons, since we want to amortize the
// cost of storing the search path as a string. In tests, we want to
// exercise the rest of the code often.
if (keyUpdates < 100) {
return;
}
@@ -3238,7 +3572,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
initMetrics();
}
~Impl() {
destroyTree(root);
eraseTree(root, &tls);
safe_free(metrics, metricsCount * sizeof(metrics[0]));
}
@@ -3306,8 +3640,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
"Total number of checks where the result is \"too old\"");
COUNTER(check_bytes_total, "Total number of key bytes checked");
COUNTER(point_writes_total, "Total number of point writes");
COUNTER(range_writes_total,
"Total number of range writes (includes prefix writes)");
COUNTER(range_writes_total, "Total number of range writes");
GAUGE(memory_bytes, "Total number of bytes in use");
COUNTER(nodes_allocated_total,
"The total number of physical tree nodes allocated");
@@ -3321,16 +3654,14 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
"The total number of entries inserted in the tree");
COUNTER(entries_erased_total,
"The total number of entries erased from the tree");
COUNTER(
gc_iterations_total,
"The total number of iterations of the main loop for garbage collection");
COUNTER(gc_iterations_total, "The total number of iterations of the main "
"loop for garbage collection");
COUNTER(write_bytes_total, "Total number of key bytes in calls to addWrites");
GAUGE(oldest_version,
"The lowest version that doesn't result in \"TooOld\" for checks");
GAUGE(newest_version, "The version of the most recent call to addWrites");
GAUGE(
oldest_extant_version,
"A lower bound on the lowest version associated with an existing entry");
GAUGE(oldest_extant_version, "A lower bound on the lowest version "
"associated with an existing entry");
// ==================== END METRICS DEFINITIONS ====================
#undef GAUGE
#undef COUNTER
@@ -3416,6 +3747,7 @@ InternalVersionT exchangeMaxVersion(Node *n, InternalVersionT newMax) {
}
void setMaxVersion(Node *n, ConflictSet::Impl *impl, InternalVersionT newMax) {
assert(newMax >= InternalVersionT::zero);
int index = n->parentsIndex;
n = n->parent;
if (n == nullptr) {
@@ -3705,8 +4037,8 @@ std::string strinc(std::string_view str, bool &ok) {
if ((uint8_t &)(str[index]) != 255)
break;
// Must not be called with a string that consists only of zero or more '\xff'
// bytes.
// Must not be called with a string that consists only of zero or more
// '\xff' bytes.
if (index < 0) {
ok = false;
return {};
@@ -3926,7 +4258,7 @@ checkMaxVersion(Node *root, Node *node, InternalVersionT oldestVersion,
bool success = true;
if (node->partialKeyLen > 0) {
fprintf(stderr, "Root cannot have a partial key");
fprintf(stderr, "Root cannot have a partial key\n");
success = false;
}
checkParentPointers(node, success);

View File

@@ -273,6 +273,16 @@ template <class T> struct Vector {
size_ += slice.size();
}
// Caller must write to the returned slice
std::span<T> unsafePrepareAppend(int appendSize) {
if (size_ + appendSize > capacity) {
grow(std::max<int>(size_ + appendSize, capacity * 2));
}
auto result = std::span<T>(t + size_, appendSize);
size_ += appendSize;
return result;
}
void push_back(const T &t) { append(std::span<const T>(&t, 1)); }
T *begin() { return t; }

View File

@@ -164,6 +164,63 @@ double toSeconds(timeval t) {
return double(t.tv_sec) + double(t.tv_usec) * 1e-6;
}
#include <linux/perf_event.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
#ifdef __linux__
struct PerfCounter {
explicit PerfCounter(int event) {
struct perf_event_attr pe;
memset(&pe, 0, sizeof(pe));
pe.type = PERF_TYPE_HARDWARE;
pe.size = sizeof(pe);
pe.config = event;
pe.inherit = 1;
pe.exclude_kernel = 1;
pe.exclude_hv = 1;
fd = perf_event_open(&pe, 0, -1, -1, 0);
if (fd == -1) {
fprintf(stderr, "Error opening leader %llx\n", pe.config);
exit(EXIT_FAILURE);
}
}
int64_t total() {
int64_t count;
if (read(fd, &count, sizeof(count)) != sizeof(count)) {
perror("read instructions from perf");
abort();
}
return count;
}
~PerfCounter() { close(fd); }
private:
int fd;
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid,
int cpu, int group_fd, unsigned long flags) {
int ret;
ret = syscall(SYS_perf_event_open, hw_event, pid, cpu, group_fd, flags);
return ret;
}
};
#else
struct PerfCounter {
explicit PerPerfCounter(int) {}
int64_t total() { return 0; }
};
#endif
int main(int argc, char **argv) {
if (argc != 3) {
goto fail;
@@ -176,6 +233,8 @@ int main(int argc, char **argv) {
int metricsCount;
cs.getMetricsV1(&metrics, &metricsCount);
PerfCounter instructions{PERF_COUNT_HW_INSTRUCTIONS};
PerfCounter cycles{PERF_COUNT_HW_CPU_CYCLES};
auto w = std::thread{workload, &cs};
for (;;) {
@@ -203,6 +262,16 @@ int main(int argc, char **argv) {
"transactions_total ";
body += std::to_string(transactions.load(std::memory_order_relaxed));
body += "\n";
body += "# HELP instructions_total Total number of instructions\n"
"# TYPE instructions_total counter\n"
"instructions_total ";
body += std::to_string(instructions.total());
body += "\n";
body += "# HELP cycles_total Total number of cycles\n"
"# TYPE cycles_total counter\n"
"cycles_total ";
body += std::to_string(cycles.total());
body += "\n";
for (int i = 0; i < metricsCount; ++i) {
body += "# HELP ";

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More