|
|
|
@@ -338,11 +338,12 @@ struct Node3 : Node {
|
|
|
|
|
constexpr static auto kMaxNodes = 3;
|
|
|
|
|
constexpr static auto kType = Type_Node3;
|
|
|
|
|
|
|
|
|
|
TaggedNodePointer children[kMaxNodes];
|
|
|
|
|
InternalVersionT childMaxVersion[kMaxNodes];
|
|
|
|
|
// Sorted
|
|
|
|
|
uint8_t index[kMaxNodes];
|
|
|
|
|
|
|
|
|
|
TaggedNodePointer children[kMaxNodes];
|
|
|
|
|
InternalVersionT childMaxVersion[kMaxNodes];
|
|
|
|
|
|
|
|
|
|
uint8_t *partialKey() {
|
|
|
|
|
assert(!releaseDeferred);
|
|
|
|
|
return (uint8_t *)(this + 1);
|
|
|
|
@@ -357,11 +358,12 @@ struct Node16 : Node {
|
|
|
|
|
constexpr static auto kType = Type_Node16;
|
|
|
|
|
constexpr static auto kMaxNodes = 16;
|
|
|
|
|
|
|
|
|
|
TaggedNodePointer children[kMaxNodes];
|
|
|
|
|
InternalVersionT childMaxVersion[kMaxNodes];
|
|
|
|
|
// Sorted
|
|
|
|
|
uint8_t index[kMaxNodes];
|
|
|
|
|
|
|
|
|
|
TaggedNodePointer children[kMaxNodes];
|
|
|
|
|
InternalVersionT childMaxVersion[kMaxNodes];
|
|
|
|
|
|
|
|
|
|
uint8_t *partialKey() {
|
|
|
|
|
assert(!releaseDeferred);
|
|
|
|
|
return (uint8_t *)(this + 1);
|
|
|
|
@@ -440,7 +442,10 @@ inline void Node3::copyChildrenAndKeyFrom(const Node0 &other) {
|
|
|
|
|
inline void Node3::copyChildrenAndKeyFrom(const Node3 &other) {
|
|
|
|
|
memcpy((char *)this + kNodeCopyBegin, (char *)&other + kNodeCopyBegin,
|
|
|
|
|
kNodeCopySize);
|
|
|
|
|
memcpy(children, other.children, sizeof(*this) - sizeof(Node));
|
|
|
|
|
memcpy(index, other.index, kMaxNodes);
|
|
|
|
|
memcpy(children, other.children, kMaxNodes * sizeof(children[0])); // NOLINT
|
|
|
|
|
memcpy(childMaxVersion, other.childMaxVersion,
|
|
|
|
|
kMaxNodes * sizeof(childMaxVersion[0]));
|
|
|
|
|
memcpy(partialKey(), &other + 1, partialKeyLen);
|
|
|
|
|
for (int i = 0; i < numChildren; ++i) {
|
|
|
|
|
assert(children[i]->parent == &other);
|
|
|
|
@@ -675,18 +680,34 @@ static_assert(kBytesPerKey - sizeof(Node0) >= kMinNodeSurplus);
|
|
|
|
|
// Which should give us the budget to pay for the key bytes. (children +
|
|
|
|
|
// entryPresent) is a lower bound on how many keys these bytes are a prefix of
|
|
|
|
|
|
|
|
|
|
// For now it's pretty much just a wrapper around malloc/free with some
|
|
|
|
|
// application-specific initialization. Maintaining a free list doesn't work
|
|
|
|
|
// that well since partial capacities mean the nodes have different sizes. If we
|
|
|
|
|
// come up with something better later we can implement it here.
|
|
|
|
|
constexpr int getMaxCapacity(int numChildren, int entryPresent,
|
|
|
|
|
int partialKeyLen) {
|
|
|
|
|
return (numChildren + entryPresent) * (partialKeyLen + 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
constexpr int getMaxCapacity(Node *self) {
|
|
|
|
|
return getMaxCapacity(self->numChildren, self->entryPresent,
|
|
|
|
|
self->partialKeyLen);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
constexpr int64_t kMaxFreeListBytes = 1 << 20;
|
|
|
|
|
|
|
|
|
|
// Maintains a free list up to kMaxFreeListBytes. If the top element of the list
|
|
|
|
|
// doesn't meet the capacity constraints, it's freed and a new node is allocated
|
|
|
|
|
// with the minimum capacity. The hope is that "unfit" nodes don't get stuck in
|
|
|
|
|
// the free list.
|
|
|
|
|
//
|
|
|
|
|
// TODO valgrind annotations
|
|
|
|
|
template <class T> struct NodeAllocator {
|
|
|
|
|
|
|
|
|
|
static_assert(sizeof(T) >= sizeof(void *));
|
|
|
|
|
static_assert(std::derived_from<T, Node>);
|
|
|
|
|
static_assert(std::is_trivial_v<T>);
|
|
|
|
|
|
|
|
|
|
T *allocate(int partialKeyCapacity) {
|
|
|
|
|
T *result = allocate_helper(partialKeyCapacity);
|
|
|
|
|
T *allocate(int minCapacity, int maxCapacity) {
|
|
|
|
|
assert(minCapacity <= maxCapacity);
|
|
|
|
|
assert(freeListSize >= 0);
|
|
|
|
|
assert(freeListSize <= kMaxFreeListBytes);
|
|
|
|
|
T *result = allocate_helper(minCapacity, maxCapacity);
|
|
|
|
|
result->endOfRange = false;
|
|
|
|
|
result->releaseDeferred = false;
|
|
|
|
|
if constexpr (!std::is_same_v<T, Node0>) {
|
|
|
|
@@ -706,8 +727,41 @@ template <class T> struct NodeAllocator {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void release(T *p) {
|
|
|
|
|
removeNode(p);
|
|
|
|
|
return safe_free(p, sizeof(T) + p->partialKeyCapacity);
|
|
|
|
|
if (freeListSize + sizeof(T) + p->partialKeyCapacity > kMaxFreeListBytes) {
|
|
|
|
|
removeNode(p);
|
|
|
|
|
return safe_free(p, sizeof(T) + p->partialKeyCapacity);
|
|
|
|
|
}
|
|
|
|
|
p->parent = freeList;
|
|
|
|
|
freeList = p;
|
|
|
|
|
freeListSize += sizeof(T) + p->partialKeyCapacity;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void deferRelease(T *p, Node *forwardTo) {
|
|
|
|
|
p->releaseDeferred = true;
|
|
|
|
|
p->forwardTo = forwardTo;
|
|
|
|
|
if (freeListSize + sizeof(T) + p->partialKeyCapacity > kMaxFreeListBytes) {
|
|
|
|
|
p->parent = deferredListOverflow;
|
|
|
|
|
deferredListOverflow = p;
|
|
|
|
|
} else {
|
|
|
|
|
if (deferredList == nullptr) {
|
|
|
|
|
deferredListFront = p;
|
|
|
|
|
}
|
|
|
|
|
p->parent = deferredList;
|
|
|
|
|
deferredList = p;
|
|
|
|
|
freeListSize += sizeof(T) + p->partialKeyCapacity;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void releaseDeferred() {
|
|
|
|
|
if (deferredList != nullptr) {
|
|
|
|
|
deferredListFront->parent = freeList;
|
|
|
|
|
freeList = std::exchange(deferredList, nullptr);
|
|
|
|
|
}
|
|
|
|
|
for (T *n = std::exchange(deferredListOverflow, nullptr); n != nullptr;) {
|
|
|
|
|
auto *tmp = n;
|
|
|
|
|
n = (T *)n->parent;
|
|
|
|
|
release(tmp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NodeAllocator() = default;
|
|
|
|
@@ -717,13 +771,46 @@ template <class T> struct NodeAllocator {
|
|
|
|
|
NodeAllocator(NodeAllocator &&) = delete;
|
|
|
|
|
NodeAllocator &operator=(NodeAllocator &&) = delete;
|
|
|
|
|
|
|
|
|
|
~NodeAllocator() {}
|
|
|
|
|
~NodeAllocator() {
|
|
|
|
|
assert(deferredList == nullptr);
|
|
|
|
|
assert(deferredListOverflow == nullptr);
|
|
|
|
|
for (T *iter = freeList; iter != nullptr;) {
|
|
|
|
|
auto *tmp = iter;
|
|
|
|
|
iter = (T *)iter->parent;
|
|
|
|
|
removeNode(tmp);
|
|
|
|
|
safe_free(tmp, sizeof(T) + tmp->partialKeyCapacity);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
T *allocate_helper(int partialKeyCapacity) {
|
|
|
|
|
auto *result = (T *)safe_malloc(sizeof(T) + partialKeyCapacity);
|
|
|
|
|
int64_t freeListSize = 0;
|
|
|
|
|
T *freeList = nullptr;
|
|
|
|
|
T *deferredList = nullptr;
|
|
|
|
|
// Used to concatenate deferredList to freeList
|
|
|
|
|
T *deferredListFront;
|
|
|
|
|
T *deferredListOverflow = nullptr;
|
|
|
|
|
|
|
|
|
|
T *allocate_helper(int minCapacity, int maxCapacity) {
|
|
|
|
|
if (freeList != nullptr) {
|
|
|
|
|
freeListSize -= sizeof(T) + freeList->partialKeyCapacity;
|
|
|
|
|
assume(freeList->partialKeyCapacity >= 0);
|
|
|
|
|
assume(minCapacity >= 0);
|
|
|
|
|
assume(minCapacity <= maxCapacity);
|
|
|
|
|
if (freeList->partialKeyCapacity >= minCapacity &&
|
|
|
|
|
freeList->partialKeyCapacity <= maxCapacity) {
|
|
|
|
|
auto *result = freeList;
|
|
|
|
|
freeList = (T *)freeList->parent;
|
|
|
|
|
return result;
|
|
|
|
|
} else {
|
|
|
|
|
auto *p = freeList;
|
|
|
|
|
freeList = (T *)p->parent;
|
|
|
|
|
removeNode(p);
|
|
|
|
|
safe_free(p, sizeof(T) + p->partialKeyCapacity);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
auto *result = (T *)safe_malloc(sizeof(T) + minCapacity);
|
|
|
|
|
result->type = T::kType;
|
|
|
|
|
result->partialKeyCapacity = partialKeyCapacity;
|
|
|
|
|
result->partialKeyCapacity = minCapacity;
|
|
|
|
|
addNode(result);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
@@ -791,18 +878,19 @@ struct WriteContext {
|
|
|
|
|
|
|
|
|
|
WriteContext() { memset(&accum, 0, sizeof(accum)); }
|
|
|
|
|
|
|
|
|
|
template <class T> T *allocate(int c) {
|
|
|
|
|
template <class T> T *allocate(int minCapacity, int maxCapacity) {
|
|
|
|
|
static_assert(!std::is_same_v<T, Node>);
|
|
|
|
|
++accum.nodes_allocated;
|
|
|
|
|
if constexpr (std::is_same_v<T, Node0>) {
|
|
|
|
|
return node0.allocate(c);
|
|
|
|
|
return node0.allocate(minCapacity, maxCapacity);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node3>) {
|
|
|
|
|
return node3.allocate(c);
|
|
|
|
|
return node3.allocate(minCapacity, maxCapacity);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node16>) {
|
|
|
|
|
return node16.allocate(c);
|
|
|
|
|
return node16.allocate(minCapacity, maxCapacity);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node48>) {
|
|
|
|
|
return node48.allocate(c);
|
|
|
|
|
return node48.allocate(minCapacity, maxCapacity);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node256>) {
|
|
|
|
|
return node256.allocate(c);
|
|
|
|
|
return node256.allocate(minCapacity, maxCapacity);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
template <class T> void release(T *c) {
|
|
|
|
@@ -822,44 +910,32 @@ struct WriteContext {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Place in a list to be released in the next call to releaseDeferred.
|
|
|
|
|
void deferRelease(Node *n, Node *forwardTo) {
|
|
|
|
|
n->releaseDeferred = true;
|
|
|
|
|
n->forwardTo = forwardTo;
|
|
|
|
|
n->parent = deferredList;
|
|
|
|
|
deferredList = n;
|
|
|
|
|
template <class T> void deferRelease(T *n, Node *forwardTo) {
|
|
|
|
|
static_assert(!std::is_same_v<T, Node>);
|
|
|
|
|
if constexpr (std::is_same_v<T, Node0>) {
|
|
|
|
|
return node0.deferRelease(n, forwardTo);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node3>) {
|
|
|
|
|
return node3.deferRelease(n, forwardTo);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node16>) {
|
|
|
|
|
return node16.deferRelease(n, forwardTo);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node48>) {
|
|
|
|
|
return node48.deferRelease(n, forwardTo);
|
|
|
|
|
} else if constexpr (std::is_same_v<T, Node256>) {
|
|
|
|
|
return node256.deferRelease(n, forwardTo);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Release all nodes passed to deferRelease since the last call to
|
|
|
|
|
// releaseDeferred.
|
|
|
|
|
void releaseDeferred() {
|
|
|
|
|
for (Node *n = std::exchange(deferredList, nullptr); n != nullptr;) {
|
|
|
|
|
auto *tmp = n;
|
|
|
|
|
n = n->parent;
|
|
|
|
|
switch (tmp->getType()) {
|
|
|
|
|
case Type_Node0:
|
|
|
|
|
release(static_cast<Node0 *>(tmp));
|
|
|
|
|
break;
|
|
|
|
|
case Type_Node3:
|
|
|
|
|
release(static_cast<Node3 *>(tmp));
|
|
|
|
|
break;
|
|
|
|
|
case Type_Node16:
|
|
|
|
|
release(static_cast<Node16 *>(tmp));
|
|
|
|
|
break;
|
|
|
|
|
case Type_Node48:
|
|
|
|
|
release(static_cast<Node48 *>(tmp));
|
|
|
|
|
break;
|
|
|
|
|
case Type_Node256:
|
|
|
|
|
release(static_cast<Node256 *>(tmp));
|
|
|
|
|
break;
|
|
|
|
|
default: // GCOVR_EXCL_LINE
|
|
|
|
|
__builtin_unreachable(); // GCOVR_EXCL_LINE
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
node0.releaseDeferred();
|
|
|
|
|
node3.releaseDeferred();
|
|
|
|
|
node16.releaseDeferred();
|
|
|
|
|
node48.releaseDeferred();
|
|
|
|
|
node256.releaseDeferred();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
Node *deferredList = nullptr;
|
|
|
|
|
|
|
|
|
|
NodeAllocator<Node0> node0;
|
|
|
|
|
NodeAllocator<Node3> node3;
|
|
|
|
|
NodeAllocator<Node16> node16;
|
|
|
|
@@ -1396,11 +1472,13 @@ TaggedNodePointer getFirstChildExists(Node *self) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// self must not be the root
|
|
|
|
|
void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext);
|
|
|
|
|
void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl);
|
|
|
|
|
|
|
|
|
|
void consumePartialKeyFull(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
InternalVersionT writeVersion,
|
|
|
|
|
WriteContext *writeContext) {
|
|
|
|
|
WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
// Handle an existing partial key
|
|
|
|
|
int commonLen = std::min<int>(self->partialKeyLen, key.size());
|
|
|
|
|
int partialKeyIndex =
|
|
|
|
@@ -1412,7 +1490,8 @@ void consumePartialKeyFull(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
InternalVersionT oldMaxVersion = exchangeMaxVersion(old, writeVersion);
|
|
|
|
|
|
|
|
|
|
// *self will have one child (old)
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(partialKeyIndex);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(
|
|
|
|
|
partialKeyIndex, getMaxCapacity(1, 0, partialKeyIndex));
|
|
|
|
|
|
|
|
|
|
newSelf->parent = old->parent;
|
|
|
|
|
newSelf->parentsIndex = old->parentsIndex;
|
|
|
|
@@ -1435,7 +1514,7 @@ void consumePartialKeyFull(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
old->partialKeyLen -= partialKeyIndex + 1;
|
|
|
|
|
|
|
|
|
|
// Maintain memory capacity invariant
|
|
|
|
|
maybeDecreaseCapacity(old, writeContext);
|
|
|
|
|
maybeDecreaseCapacity(old, writeContext, impl);
|
|
|
|
|
}
|
|
|
|
|
key = key.subspan(partialKeyIndex, key.size() - partialKeyIndex);
|
|
|
|
|
}
|
|
|
|
@@ -1444,9 +1523,10 @@ void consumePartialKeyFull(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
// `key` such that `self` is along the search path of `key`
|
|
|
|
|
inline __attribute__((always_inline)) void
|
|
|
|
|
consumePartialKey(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
InternalVersionT writeVersion, WriteContext *writeContext) {
|
|
|
|
|
InternalVersionT writeVersion, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
if (self->partialKeyLen > 0) {
|
|
|
|
|
consumePartialKeyFull(self, key, writeVersion, writeContext);
|
|
|
|
|
consumePartialKeyFull(self, key, writeVersion, writeContext, impl);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1456,7 +1536,8 @@ consumePartialKey(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
// `maxVersion` for result.
|
|
|
|
|
TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
InternalVersionT newMaxVersion,
|
|
|
|
|
WriteContext *writeContext) {
|
|
|
|
|
WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
|
|
|
|
|
int index = key.front();
|
|
|
|
|
key = key.subspan(1, key.size() - 1);
|
|
|
|
@@ -1469,7 +1550,8 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
auto *self3 = static_cast<Node3 *>(self);
|
|
|
|
|
int i = getNodeIndex(self3, index);
|
|
|
|
|
if (i >= 0) {
|
|
|
|
|
consumePartialKey(self3->children[i], key, newMaxVersion, writeContext);
|
|
|
|
|
consumePartialKey(self3->children[i], key, newMaxVersion, writeContext,
|
|
|
|
|
impl);
|
|
|
|
|
self3->childMaxVersion[i] = newMaxVersion;
|
|
|
|
|
return self3->children[i];
|
|
|
|
|
}
|
|
|
|
@@ -1478,7 +1560,8 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
auto *self16 = static_cast<Node16 *>(self);
|
|
|
|
|
int i = getNodeIndex(self16, index);
|
|
|
|
|
if (i >= 0) {
|
|
|
|
|
consumePartialKey(self16->children[i], key, newMaxVersion, writeContext);
|
|
|
|
|
consumePartialKey(self16->children[i], key, newMaxVersion, writeContext,
|
|
|
|
|
impl);
|
|
|
|
|
self16->childMaxVersion[i] = newMaxVersion;
|
|
|
|
|
return self16->children[i];
|
|
|
|
|
}
|
|
|
|
@@ -1488,7 +1571,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
int secondIndex = self48->index[index];
|
|
|
|
|
if (secondIndex >= 0) {
|
|
|
|
|
consumePartialKey(self48->children[secondIndex], key, newMaxVersion,
|
|
|
|
|
writeContext);
|
|
|
|
|
writeContext, impl);
|
|
|
|
|
self48->childMaxVersion[secondIndex] = newMaxVersion;
|
|
|
|
|
self48->maxOfMax[secondIndex >> Node48::kMaxOfMaxShift] =
|
|
|
|
|
std::max(self48->maxOfMax[secondIndex >> Node48::kMaxOfMaxShift],
|
|
|
|
@@ -1499,7 +1582,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
case Type_Node256: {
|
|
|
|
|
auto *self256 = static_cast<Node256 *>(self);
|
|
|
|
|
if (auto &result = self256->children[index]; result != nullptr) {
|
|
|
|
|
consumePartialKey(result, key, newMaxVersion, writeContext);
|
|
|
|
|
consumePartialKey(result, key, newMaxVersion, writeContext, impl);
|
|
|
|
|
self256->childMaxVersion[index] = newMaxVersion;
|
|
|
|
|
self256->maxOfMax[index >> Node256::kMaxOfMaxShift] = std::max(
|
|
|
|
|
self256->maxOfMax[index >> Node256::kMaxOfMaxShift], newMaxVersion);
|
|
|
|
@@ -1510,9 +1593,10 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
__builtin_unreachable(); // GCOVR_EXCL_LINE
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto *newChild = writeContext->allocate<Node0>(key.size());
|
|
|
|
|
auto *newChild = writeContext->allocate<Node0>(
|
|
|
|
|
key.size(), getMaxCapacity(0, 1, key.size()));
|
|
|
|
|
newChild->numChildren = 0;
|
|
|
|
|
newChild->entryPresent = false;
|
|
|
|
|
newChild->entryPresent = false; // Will be set to true by the caller
|
|
|
|
|
newChild->partialKeyLen = key.size();
|
|
|
|
|
newChild->parentsIndex = index;
|
|
|
|
|
memcpy(newChild->partialKey(), key.data(), key.size());
|
|
|
|
@@ -1522,7 +1606,8 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
case Type_Node0: {
|
|
|
|
|
auto *self0 = static_cast<Node0 *>(self);
|
|
|
|
|
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(
|
|
|
|
|
self->partialKeyLen, getMaxCapacity(1, 1, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self0);
|
|
|
|
|
writeContext->deferRelease(self0, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
@@ -1532,7 +1617,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
case Type_Node3: {
|
|
|
|
|
if (self->numChildren == Node3::kMaxNodes) {
|
|
|
|
|
auto *self3 = static_cast<Node3 *>(self);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(4, self->entryPresent, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self3);
|
|
|
|
|
writeContext->deferRelease(self3, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
@@ -1561,7 +1648,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
case Type_Node16: {
|
|
|
|
|
if (self->numChildren == Node16::kMaxNodes) {
|
|
|
|
|
auto *self16 = static_cast<Node16 *>(self);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(17, self->entryPresent, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self16);
|
|
|
|
|
writeContext->deferRelease(self16, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
@@ -1592,7 +1681,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, TrivialSpan &key,
|
|
|
|
|
|
|
|
|
|
if (self->numChildren == 48) {
|
|
|
|
|
auto *self48 = static_cast<Node48 *>(self);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node256>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node256>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(49, self->entryPresent, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self48);
|
|
|
|
|
writeContext->deferRelease(self48, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
@@ -1643,7 +1734,7 @@ Node *nextPhysical(Node *node) {
|
|
|
|
|
if (node == nullptr) {
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
auto nextChild = getChildGeq(node, index + 1);
|
|
|
|
|
Node *nextChild = getChildGeq(node, index + 1);
|
|
|
|
|
if (nextChild != nullptr) {
|
|
|
|
|
return nextChild;
|
|
|
|
|
}
|
|
|
|
@@ -1662,7 +1753,7 @@ Node *nextLogical(Node *node) {
|
|
|
|
|
if (node == nullptr) {
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
auto nextChild = getChildGeq(node, index + 1);
|
|
|
|
|
Node *nextChild = getChildGeq(node, index + 1);
|
|
|
|
|
if (nextChild != nullptr) {
|
|
|
|
|
node = nextChild;
|
|
|
|
|
goto downLeftSpine;
|
|
|
|
@@ -1674,46 +1765,47 @@ downLeftSpine:
|
|
|
|
|
return node;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void freeAndMakeCapacityAtLeast(Node *&self, int capacity,
|
|
|
|
|
WriteContext *writeContext) {
|
|
|
|
|
void freeAndMakeCapacityBetween(Node *&self, int minCapacity, int maxCapacity,
|
|
|
|
|
WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
switch (self->getType()) {
|
|
|
|
|
case Type_Node0: {
|
|
|
|
|
auto *self0 = (Node0 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node0>(capacity);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node0>(minCapacity, maxCapacity);
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self0);
|
|
|
|
|
getInTree(self, nullptr) = newSelf;
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self0, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
|
} break;
|
|
|
|
|
case Type_Node3: {
|
|
|
|
|
auto *self3 = (Node3 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(capacity);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(minCapacity, maxCapacity);
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self3);
|
|
|
|
|
getInTree(self, nullptr) = newSelf;
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self3, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
|
} break;
|
|
|
|
|
case Type_Node16: {
|
|
|
|
|
auto *self16 = (Node16 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(capacity);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(minCapacity, maxCapacity);
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self16);
|
|
|
|
|
getInTree(self, nullptr) = newSelf;
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self16, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
|
} break;
|
|
|
|
|
case Type_Node48: {
|
|
|
|
|
auto *self48 = (Node48 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(capacity);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(minCapacity, maxCapacity);
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self48);
|
|
|
|
|
getInTree(self, nullptr) = newSelf;
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self48, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
|
} break;
|
|
|
|
|
case Type_Node256: {
|
|
|
|
|
auto *self256 = (Node256 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node256>(capacity);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node256>(minCapacity, maxCapacity);
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self256);
|
|
|
|
|
getInTree(self, nullptr) = newSelf;
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self256, newSelf);
|
|
|
|
|
self = newSelf;
|
|
|
|
|
} break;
|
|
|
|
@@ -1723,7 +1815,8 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fix larger-than-desired capacities. self must not be the root
|
|
|
|
|
void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext) {
|
|
|
|
|
void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
|
|
|
|
|
const int maxCapacity =
|
|
|
|
|
(self->numChildren + int(self->entryPresent)) * (self->partialKeyLen + 1);
|
|
|
|
@@ -1735,7 +1828,8 @@ void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext) {
|
|
|
|
|
if (self->getCapacity() <= maxCapacity) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
freeAndMakeCapacityAtLeast(self, maxCapacity, writeContext);
|
|
|
|
|
freeAndMakeCapacityBetween(self, self->partialKeyLen, maxCapacity,
|
|
|
|
|
writeContext, impl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__)
|
|
|
|
@@ -1805,13 +1899,16 @@ void rezero(Node *n, InternalVersionT z) {
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
void mergeWithChild(TaggedNodePointer &self, WriteContext *writeContext,
|
|
|
|
|
Node3 *self3) {
|
|
|
|
|
Node3 *self3, ConflictSet::Impl *impl) {
|
|
|
|
|
assert(!self3->entryPresent);
|
|
|
|
|
Node *child = self3->children[0];
|
|
|
|
|
int minCapacity = self3->partialKeyLen + 1 + child->partialKeyLen;
|
|
|
|
|
const int minCapacity = self3->partialKeyLen + 1 + child->partialKeyLen;
|
|
|
|
|
const int maxCapacity =
|
|
|
|
|
getMaxCapacity(child->numChildren, child->entryPresent, minCapacity);
|
|
|
|
|
|
|
|
|
|
if (minCapacity > child->getCapacity()) {
|
|
|
|
|
freeAndMakeCapacityAtLeast(child, minCapacity, writeContext);
|
|
|
|
|
freeAndMakeCapacityBetween(child, minCapacity, maxCapacity, writeContext,
|
|
|
|
|
impl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Merge partial key with child
|
|
|
|
@@ -1850,20 +1947,23 @@ bool needsDownsize(Node *n) {
|
|
|
|
|
void downsize(Node3 *self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
if (self->numChildren == 0) {
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node0>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node0>(
|
|
|
|
|
self->partialKeyLen, getMaxCapacity(0, 1, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self);
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self, newSelf);
|
|
|
|
|
} else {
|
|
|
|
|
assert(self->numChildren == 1 && !self->entryPresent);
|
|
|
|
|
mergeWithChild(getInTree(self, impl), writeContext, self);
|
|
|
|
|
mergeWithChild(getInTree(self, impl), writeContext, self, impl);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void downsize(Node16 *self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode16);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node3>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(kMinChildrenNode16 - 1, 0, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self);
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self, newSelf);
|
|
|
|
@@ -1872,7 +1972,9 @@ void downsize(Node16 *self, WriteContext *writeContext,
|
|
|
|
|
void downsize(Node48 *self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode48);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node16>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(kMinChildrenNode48 - 1, 0, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self);
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self, newSelf);
|
|
|
|
@@ -1882,7 +1984,9 @@ void downsize(Node256 *self, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode256);
|
|
|
|
|
auto *self256 = (Node256 *)self;
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(self->partialKeyLen);
|
|
|
|
|
auto *newSelf = writeContext->allocate<Node48>(
|
|
|
|
|
self->partialKeyLen,
|
|
|
|
|
getMaxCapacity(kMinChildrenNode256 - 1, 0, self->partialKeyLen));
|
|
|
|
|
newSelf->copyChildrenAndKeyFrom(*self256);
|
|
|
|
|
getInTree(self, impl) = newSelf;
|
|
|
|
|
writeContext->deferRelease(self256, newSelf);
|
|
|
|
@@ -1939,7 +2043,7 @@ Node *erase(Node *self, WriteContext *writeContext, ConflictSet::Impl *impl,
|
|
|
|
|
while (self->releaseDeferred) {
|
|
|
|
|
self = self->forwardTo;
|
|
|
|
|
}
|
|
|
|
|
maybeDecreaseCapacity(self, writeContext);
|
|
|
|
|
maybeDecreaseCapacity(self, writeContext, impl);
|
|
|
|
|
if (result != nullptr) {
|
|
|
|
|
while (result->releaseDeferred) {
|
|
|
|
|
result = result->forwardTo;
|
|
|
|
@@ -2030,7 +2134,7 @@ Node *erase(Node *self, WriteContext *writeContext, ConflictSet::Impl *impl,
|
|
|
|
|
while (parent->releaseDeferred) {
|
|
|
|
|
parent = parent->forwardTo;
|
|
|
|
|
}
|
|
|
|
|
maybeDecreaseCapacity(parent, writeContext);
|
|
|
|
|
maybeDecreaseCapacity(parent, writeContext, impl);
|
|
|
|
|
|
|
|
|
|
if (result != nullptr) {
|
|
|
|
|
while (result->releaseDeferred) {
|
|
|
|
@@ -2735,13 +2839,12 @@ checkMaxBetweenExclusiveImpl<true>(Node256 *n, int begin, int end,
|
|
|
|
|
// of the result will have `maxVersion` set to `writeVersion` as a
|
|
|
|
|
// postcondition. Nodes along the search path may be invalidated. Callers must
|
|
|
|
|
// ensure that the max version of the self argument is updated.
|
|
|
|
|
[[nodiscard]] TaggedNodePointer *insert(TaggedNodePointer *self,
|
|
|
|
|
TrivialSpan key,
|
|
|
|
|
InternalVersionT writeVersion,
|
|
|
|
|
WriteContext *writeContext) {
|
|
|
|
|
[[nodiscard]] TaggedNodePointer *
|
|
|
|
|
insert(TaggedNodePointer *self, TrivialSpan key, InternalVersionT writeVersion,
|
|
|
|
|
WriteContext *writeContext, ConflictSet::Impl *impl) {
|
|
|
|
|
|
|
|
|
|
for (; key.size() != 0; ++writeContext->accum.insert_iterations) {
|
|
|
|
|
self = &getOrCreateChild(*self, key, writeVersion, writeContext);
|
|
|
|
|
self = &getOrCreateChild(*self, key, writeVersion, writeContext, impl);
|
|
|
|
|
}
|
|
|
|
|
return self;
|
|
|
|
|
}
|
|
|
|
@@ -2799,9 +2902,10 @@ void eraseTree(Node *root, WriteContext *writeContext) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void addPointWrite(TaggedNodePointer &root, TrivialSpan key,
|
|
|
|
|
InternalVersionT writeVersion, WriteContext *writeContext) {
|
|
|
|
|
InternalVersionT writeVersion, WriteContext *writeContext,
|
|
|
|
|
ConflictSet::Impl *impl) {
|
|
|
|
|
++writeContext->accum.point_writes;
|
|
|
|
|
auto n = *insert(&root, key, writeVersion, writeContext);
|
|
|
|
|
auto n = *insert(&root, key, writeVersion, writeContext, impl);
|
|
|
|
|
if (!n->entryPresent) {
|
|
|
|
|
++writeContext->accum.entries_inserted;
|
|
|
|
|
auto *p = nextLogical(n);
|
|
|
|
@@ -2935,8 +3039,8 @@ AddedWriteRange addWriteRange(Node *beginRoot, TrivialSpan begin, Node *endRoot,
|
|
|
|
|
|
|
|
|
|
++writeContext->accum.range_writes;
|
|
|
|
|
|
|
|
|
|
Node *beginNode =
|
|
|
|
|
*insert(&getInTree(beginRoot, impl), begin, writeVersion, writeContext);
|
|
|
|
|
Node *beginNode = *insert(&getInTree(beginRoot, impl), begin, writeVersion,
|
|
|
|
|
writeContext, impl);
|
|
|
|
|
addKey(beginNode);
|
|
|
|
|
if (!beginNode->entryPresent) {
|
|
|
|
|
++writeContext->accum.entries_inserted;
|
|
|
|
@@ -2952,7 +3056,7 @@ AddedWriteRange addWriteRange(Node *beginRoot, TrivialSpan begin, Node *endRoot,
|
|
|
|
|
beginNode->entry.pointVersion = writeVersion;
|
|
|
|
|
|
|
|
|
|
Node *endNode =
|
|
|
|
|
*insert(&getInTree(endRoot, impl), end, writeVersion, writeContext);
|
|
|
|
|
*insert(&getInTree(endRoot, impl), end, writeVersion, writeContext, impl);
|
|
|
|
|
|
|
|
|
|
addKey(endNode);
|
|
|
|
|
if (!endNode->entryPresent) {
|
|
|
|
@@ -2998,10 +3102,10 @@ void addWriteRange(TaggedNodePointer &root, TrivialSpan begin, TrivialSpan end,
|
|
|
|
|
std::min(begin.size(), end.size()));
|
|
|
|
|
if (lcp == begin.size() && end.size() == begin.size() + 1 &&
|
|
|
|
|
end.back() == 0) {
|
|
|
|
|
return addPointWrite(root, begin, writeVersion, writeContext);
|
|
|
|
|
return addPointWrite(root, begin, writeVersion, writeContext, impl);
|
|
|
|
|
}
|
|
|
|
|
auto useAsRoot =
|
|
|
|
|
insert(&root, begin.subspan(0, lcp), writeVersion, writeContext);
|
|
|
|
|
insert(&root, begin.subspan(0, lcp), writeVersion, writeContext, impl);
|
|
|
|
|
|
|
|
|
|
auto [beginNode, endNode] = addWriteRange(
|
|
|
|
|
*useAsRoot, begin.subspan(lcp, begin.size() - lcp), *useAsRoot,
|
|
|
|
@@ -3079,6 +3183,12 @@ Node *firstGeqPhysical(Node *n, const TrivialSpan key) {
|
|
|
|
|
#define PRESERVE_NONE
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if __has_attribute(musttail) && __has_attribute(preserve_none)
|
|
|
|
|
constexpr bool kEnableInterleaved = true;
|
|
|
|
|
#else
|
|
|
|
|
constexpr bool kEnableInterleaved = false;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
namespace check {
|
|
|
|
|
|
|
|
|
|
typedef PRESERVE_NONE void (*Continuation)(struct Job *, struct Context *);
|
|
|
|
@@ -4865,51 +4975,50 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
|
|
|
|
check::Context context;
|
|
|
|
|
context.readContext.impl = this;
|
|
|
|
|
|
|
|
|
|
#if __has_attribute(musttail)
|
|
|
|
|
if (count == 1) {
|
|
|
|
|
useSequential(reads, result, count, context);
|
|
|
|
|
} else {
|
|
|
|
|
constexpr int kConcurrent = 16;
|
|
|
|
|
check::Job inProgress[kConcurrent];
|
|
|
|
|
context.count = count;
|
|
|
|
|
context.oldestVersionFullPrecision = oldestVersionFullPrecision;
|
|
|
|
|
context.root = root;
|
|
|
|
|
context.queries = reads;
|
|
|
|
|
context.results = result;
|
|
|
|
|
int64_t started = std::min(kConcurrent, count);
|
|
|
|
|
context.started = started;
|
|
|
|
|
for (int i = 0; i < started; i++) {
|
|
|
|
|
inProgress[i].init(reads + i, result + i, root,
|
|
|
|
|
oldestVersionFullPrecision);
|
|
|
|
|
}
|
|
|
|
|
for (int i = 0; i < started - 1; i++) {
|
|
|
|
|
inProgress[i].next = inProgress + i + 1;
|
|
|
|
|
}
|
|
|
|
|
for (int i = 1; i < started; i++) {
|
|
|
|
|
inProgress[i].prev = inProgress + i - 1;
|
|
|
|
|
}
|
|
|
|
|
inProgress[0].prev = inProgress + started - 1;
|
|
|
|
|
inProgress[started - 1].next = inProgress;
|
|
|
|
|
if constexpr (kEnableInterleaved) {
|
|
|
|
|
if (count == 1) {
|
|
|
|
|
useSequential(reads, result, count, context);
|
|
|
|
|
} else {
|
|
|
|
|
constexpr int kConcurrent = 16;
|
|
|
|
|
check::Job inProgress[kConcurrent];
|
|
|
|
|
context.count = count;
|
|
|
|
|
context.oldestVersionFullPrecision = oldestVersionFullPrecision;
|
|
|
|
|
context.root = root;
|
|
|
|
|
context.queries = reads;
|
|
|
|
|
context.results = result;
|
|
|
|
|
int64_t started = std::min(kConcurrent, count);
|
|
|
|
|
context.started = started;
|
|
|
|
|
for (int i = 0; i < started; i++) {
|
|
|
|
|
inProgress[i].init(reads + i, result + i, root,
|
|
|
|
|
oldestVersionFullPrecision);
|
|
|
|
|
}
|
|
|
|
|
for (int i = 0; i < started - 1; i++) {
|
|
|
|
|
inProgress[i].next = inProgress + i + 1;
|
|
|
|
|
}
|
|
|
|
|
for (int i = 1; i < started; i++) {
|
|
|
|
|
inProgress[i].prev = inProgress + i - 1;
|
|
|
|
|
}
|
|
|
|
|
inProgress[0].prev = inProgress + started - 1;
|
|
|
|
|
inProgress[started - 1].next = inProgress;
|
|
|
|
|
|
|
|
|
|
// Kick off the sequence of tail calls that finally returns once all jobs
|
|
|
|
|
// are done
|
|
|
|
|
inProgress->continuation(inProgress, &context);
|
|
|
|
|
// Kick off the sequence of tail calls that finally returns once all
|
|
|
|
|
// jobs are done
|
|
|
|
|
inProgress->continuation(inProgress, &context);
|
|
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
|
Arena arena;
|
|
|
|
|
auto *results2 = new (arena) Result[count];
|
|
|
|
|
check::Context context2;
|
|
|
|
|
context2.readContext.impl = this;
|
|
|
|
|
useSequential(reads, results2, count, context2);
|
|
|
|
|
assert(memcmp(result, results2, count) == 0);
|
|
|
|
|
assert(context.readContext == context2.readContext);
|
|
|
|
|
Arena arena;
|
|
|
|
|
auto *results2 = new (arena) Result[count];
|
|
|
|
|
check::Context context2;
|
|
|
|
|
context2.readContext.impl = this;
|
|
|
|
|
useSequential(reads, results2, count, context2);
|
|
|
|
|
assert(memcmp(result, results2, count) == 0);
|
|
|
|
|
assert(context.readContext == context2.readContext);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
useSequential(reads, result, count, context);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
useSequential(reads, result, count, context);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < count; ++i) {
|
|
|
|
|
assert(reads[i].readVersion >= 0);
|
|
|
|
|
assert(reads[i].readVersion <= newestVersionFullPrecision);
|
|
|
|
@@ -5021,8 +5130,8 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
|
|
|
|
}
|
|
|
|
|
if (context.results[i].endInsertionPoint == nullptr) {
|
|
|
|
|
addPointWrite(getInTree(context.results[i].insertionPoint, this),
|
|
|
|
|
context.results[i].remaining, writeVersion,
|
|
|
|
|
&writeContext);
|
|
|
|
|
context.results[i].remaining, writeVersion, &writeContext,
|
|
|
|
|
this);
|
|
|
|
|
} else {
|
|
|
|
|
if (firstRangeWrite == nullptr) {
|
|
|
|
|
firstRangeWrite = context.results + i;
|
|
|
|
@@ -5082,11 +5191,6 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
|
|
|
|
assert(allPointWrites || sorted);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if __has_attribute(musttail)
|
|
|
|
|
constexpr bool kEnableInterleaved = true;
|
|
|
|
|
#else
|
|
|
|
|
constexpr bool kEnableInterleaved = false;
|
|
|
|
|
#endif
|
|
|
|
|
if (kEnableInterleaved && count > 1) {
|
|
|
|
|
interleavedWrites(writes, count, InternalVersionT(writeVersion));
|
|
|
|
|
} else {
|
|
|
|
@@ -5099,7 +5203,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
|
|
|
|
&writeContext, this);
|
|
|
|
|
} else {
|
|
|
|
|
addPointWrite(root, begin, InternalVersionT(writeVersion),
|
|
|
|
|
&writeContext);
|
|
|
|
|
&writeContext, this);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -5297,7 +5401,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
|
|
|
|
|
keyUpdates = 10;
|
|
|
|
|
|
|
|
|
|
// Insert ""
|
|
|
|
|
root = writeContext.allocate<Node0>(0);
|
|
|
|
|
root = writeContext.allocate<Node0>(0, 0);
|
|
|
|
|
root->numChildren = 0;
|
|
|
|
|
root->parent = nullptr;
|
|
|
|
|
root->entryPresent = false;
|
|
|
|
|