diff --git a/ConflictSet.cpp b/ConflictSet.cpp index 7063d38..8c86aab 100644 --- a/ConflictSet.cpp +++ b/ConflictSet.cpp @@ -1322,7 +1322,8 @@ Node *getFirstChildExists(Node *self) { void consumePartialKeyFull(TaggedNodePointer &self, std::span &key, - InternalVersionT writeVersion, WriteContext *tls) { + InternalVersionT writeVersion, + WriteContext *writeContext) { // Handle an existing partial key int commonLen = std::min(self->partialKeyLen, key.size()); int partialKeyIndex = @@ -1334,7 +1335,7 @@ void consumePartialKeyFull(TaggedNodePointer &self, InternalVersionT oldMaxVersion = exchangeMaxVersion(old, writeVersion); // *self will have one child (old) - auto *newSelf = tls->allocate(partialKeyIndex); + auto *newSelf = writeContext->allocate(partialKeyIndex); newSelf->parent = old->parent; newSelf->parentsIndex = old->parentsIndex; @@ -1367,9 +1368,9 @@ void consumePartialKeyFull(TaggedNodePointer &self, // `key` such that `self` is along the search path of `key` inline __attribute__((always_inline)) void consumePartialKey(TaggedNodePointer &self, std::span &key, - InternalVersionT writeVersion, WriteContext *tls) { + InternalVersionT writeVersion, WriteContext *writeContext) { if (self->partialKeyLen > 0) { - consumePartialKeyFull(self, key, writeVersion, tls); + consumePartialKeyFull(self, key, writeVersion, writeContext); } } @@ -1380,7 +1381,7 @@ consumePartialKey(TaggedNodePointer &self, std::span &key, TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, std::span &key, InternalVersionT newMaxVersion, - WriteContext *tls) { + WriteContext *writeContext) { int index = key.front(); key = key.subspan(1, key.size() - 1); @@ -1393,7 +1394,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, auto *self3 = static_cast(self); int i = getNodeIndex(self3, index); if (i >= 0) { - consumePartialKey(self3->children[i], key, newMaxVersion, tls); + consumePartialKey(self3->children[i], key, newMaxVersion, writeContext); self3->childMaxVersion[i] = newMaxVersion; return self3->children[i]; } @@ -1402,7 +1403,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, auto *self16 = static_cast(self); int i = getNodeIndex(self16, index); if (i >= 0) { - consumePartialKey(self16->children[i], key, newMaxVersion, tls); + consumePartialKey(self16->children[i], key, newMaxVersion, writeContext); self16->childMaxVersion[i] = newMaxVersion; return self16->children[i]; } @@ -1411,7 +1412,8 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, auto *self48 = static_cast(self); int secondIndex = self48->index[index]; if (secondIndex >= 0) { - consumePartialKey(self48->children[secondIndex], key, newMaxVersion, tls); + consumePartialKey(self48->children[secondIndex], key, newMaxVersion, + writeContext); self48->childMaxVersion[secondIndex] = newMaxVersion; self48->maxOfMax[secondIndex >> Node48::kMaxOfMaxShift] = std::max(self48->maxOfMax[secondIndex >> Node48::kMaxOfMaxShift], @@ -1422,7 +1424,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, case Type_Node256: { auto *self256 = static_cast(self); if (auto &result = self256->children[index]; result != nullptr) { - consumePartialKey(result, key, newMaxVersion, tls); + consumePartialKey(result, key, newMaxVersion, writeContext); self256->childMaxVersion[index] = newMaxVersion; self256->maxOfMax[index >> Node256::kMaxOfMaxShift] = std::max( self256->maxOfMax[index >> Node256::kMaxOfMaxShift], newMaxVersion); @@ -1433,7 +1435,7 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, __builtin_unreachable(); // GCOVR_EXCL_LINE } - auto *newChild = tls->allocate(key.size()); + auto *newChild = writeContext->allocate(key.size()); newChild->numChildren = 0; newChild->entryPresent = false; newChild->partialKeyLen = key.size(); @@ -1445,9 +1447,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, case Type_Node0: { auto *self0 = static_cast(self); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self0); - tls->release(self0); + writeContext->release(self0); self = newSelf; goto insert3; @@ -1455,9 +1457,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, case Type_Node3: { if (self->numChildren == Node3::kMaxNodes) { auto *self3 = static_cast(self); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self3); - tls->release(self3); + writeContext->release(self3); self = newSelf; goto insert16; } @@ -1484,9 +1486,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, case Type_Node16: { if (self->numChildren == Node16::kMaxNodes) { auto *self16 = static_cast(self); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self16); - tls->release(self16); + writeContext->release(self16); self = newSelf; goto insert48; } @@ -1515,9 +1517,9 @@ TaggedNodePointer &getOrCreateChild(TaggedNodePointer &self, if (self->numChildren == 48) { auto *self48 = static_cast(self); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self48); - tls->release(self48); + writeContext->release(self48); self = newSelf; goto insert256; } @@ -1579,17 +1581,18 @@ Node *nextLogical(Node *node) { // Invalidates `self`, replacing it with a node of at least capacity. // Does not return nodes to freelists when kUseFreeList is false. -void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, +void freeAndMakeCapacityAtLeast(Node *&self, int capacity, + WriteContext *writeContext, ConflictSet::Impl *impl, const bool kUseFreeList) { switch (self->getType()) { case Type_Node0: { auto *self0 = (Node0 *)self; - auto *newSelf = tls->allocate(capacity); + auto *newSelf = writeContext->allocate(capacity); newSelf->copyChildrenAndKeyFrom(*self0); getInTree(self, impl) = newSelf; if (kUseFreeList) { - tls->release(self0); + writeContext->release(self0); } else { removeNode(self0); safe_free(self0, self0->size()); @@ -1598,11 +1601,11 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, } break; case Type_Node3: { auto *self3 = (Node3 *)self; - auto *newSelf = tls->allocate(capacity); + auto *newSelf = writeContext->allocate(capacity); newSelf->copyChildrenAndKeyFrom(*self3); getInTree(self, impl) = newSelf; if (kUseFreeList) { - tls->release(self3); + writeContext->release(self3); } else { removeNode(self3); safe_free(self3, self3->size()); @@ -1611,11 +1614,11 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, } break; case Type_Node16: { auto *self16 = (Node16 *)self; - auto *newSelf = tls->allocate(capacity); + auto *newSelf = writeContext->allocate(capacity); newSelf->copyChildrenAndKeyFrom(*self16); getInTree(self, impl) = newSelf; if (kUseFreeList) { - tls->release(self16); + writeContext->release(self16); } else { removeNode(self16); safe_free(self16, self16->size()); @@ -1624,11 +1627,11 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, } break; case Type_Node48: { auto *self48 = (Node48 *)self; - auto *newSelf = tls->allocate(capacity); + auto *newSelf = writeContext->allocate(capacity); newSelf->copyChildrenAndKeyFrom(*self48); getInTree(self, impl) = newSelf; if (kUseFreeList) { - tls->release(self48); + writeContext->release(self48); } else { removeNode(self48); safe_free(self48, self48->size()); @@ -1637,11 +1640,11 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, } break; case Type_Node256: { auto *self256 = (Node256 *)self; - auto *newSelf = tls->allocate(capacity); + auto *newSelf = writeContext->allocate(capacity); newSelf->copyChildrenAndKeyFrom(*self256); getInTree(self, impl) = newSelf; if (kUseFreeList) { - tls->release(self256); + writeContext->release(self256); } else { removeNode(self256); safe_free(self256, self256->size()); @@ -1656,7 +1659,7 @@ void freeAndMakeCapacityAtLeast(Node *&self, int capacity, WriteContext *tls, // Fix larger-than-desired capacities. Does not return nodes to freelists, // since that wouldn't actually reclaim the memory used for partial key // capacity. -void maybeDecreaseCapacity(Node *&self, WriteContext *tls, +void maybeDecreaseCapacity(Node *&self, WriteContext *writeContext, ConflictSet::Impl *impl) { const int maxCapacity = @@ -1669,7 +1672,7 @@ void maybeDecreaseCapacity(Node *&self, WriteContext *tls, if (self->getCapacity() <= maxCapacity) { return; } - freeAndMakeCapacityAtLeast(self, maxCapacity, tls, impl, false); + freeAndMakeCapacityAtLeast(self, maxCapacity, writeContext, impl, false); } #if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__) @@ -1738,7 +1741,7 @@ void rezero(Node *n, InternalVersionT z) { } #endif -void mergeWithChild(TaggedNodePointer &self, WriteContext *tls, +void mergeWithChild(TaggedNodePointer &self, WriteContext *writeContext, ConflictSet::Impl *impl, Node *&dontInvalidate, Node3 *self3) { assert(!self3->entryPresent); @@ -1747,7 +1750,7 @@ void mergeWithChild(TaggedNodePointer &self, WriteContext *tls, if (minCapacity > child->getCapacity()) { const bool update = child == dontInvalidate; - freeAndMakeCapacityAtLeast(child, minCapacity, tls, impl, true); + freeAndMakeCapacityAtLeast(child, minCapacity, writeContext, impl, true); if (update) { dontInvalidate = child; } @@ -1774,10 +1777,10 @@ void mergeWithChild(TaggedNodePointer &self, WriteContext *tls, // Max versions are stored in the parent, so we need to update it now // that we have a new parent. Safe we call since the root never has a partial // key. - setMaxVersion(child, std::max(childMaxVersion, tls->zero)); + setMaxVersion(child, std::max(childMaxVersion, writeContext->zero)); self = child; - tls->release(self3); + writeContext->release(self3); } bool needsDownsize(Node *n) { @@ -1786,61 +1789,65 @@ bool needsDownsize(Node *n) { return n->numChildren + n->entryPresent < minTable[n->getType()]; } -void downsize(Node3 *self, WriteContext *tls, ConflictSet::Impl *impl, +void downsize(Node3 *self, WriteContext *writeContext, ConflictSet::Impl *impl, Node *&dontInvalidate) { if (self->numChildren == 0) { - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self); getInTree(self, impl) = newSelf; - tls->release(self); + writeContext->release(self); } else { assert(self->numChildren == 1 && !self->entryPresent); - mergeWithChild(getInTree(self, impl), tls, impl, dontInvalidate, self); + mergeWithChild(getInTree(self, impl), writeContext, impl, dontInvalidate, + self); } } -void downsize(Node16 *self, WriteContext *tls, ConflictSet::Impl *impl) { +void downsize(Node16 *self, WriteContext *writeContext, + ConflictSet::Impl *impl) { assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode16); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self); getInTree(self, impl) = newSelf; - tls->release(self); + writeContext->release(self); } -void downsize(Node48 *self, WriteContext *tls, ConflictSet::Impl *impl) { +void downsize(Node48 *self, WriteContext *writeContext, + ConflictSet::Impl *impl) { assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode48); - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self); getInTree(self, impl) = newSelf; - tls->release(self); + writeContext->release(self); } -void downsize(Node256 *self, WriteContext *tls, ConflictSet::Impl *impl) { +void downsize(Node256 *self, WriteContext *writeContext, + ConflictSet::Impl *impl) { assert(self->numChildren + int(self->entryPresent) < kMinChildrenNode256); auto *self256 = (Node256 *)self; - auto *newSelf = tls->allocate(self->partialKeyLen); + auto *newSelf = writeContext->allocate(self->partialKeyLen); newSelf->copyChildrenAndKeyFrom(*self256); getInTree(self, impl) = newSelf; - tls->release(self256); + writeContext->release(self256); } -void downsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl, +void downsize(Node *self, WriteContext *writeContext, ConflictSet::Impl *impl, Node *&dontInvalidate) { switch (self->getType()) { case Type_Node0: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE case Type_Node3: - downsize(static_cast(self), tls, impl, dontInvalidate); + downsize(static_cast(self), writeContext, impl, dontInvalidate); break; case Type_Node16: - downsize(static_cast(self), tls, impl); + downsize(static_cast(self), writeContext, impl); break; case Type_Node48: - downsize(static_cast(self), tls, impl); + downsize(static_cast(self), writeContext, impl); break; case Type_Node256: - downsize(static_cast(self), tls, impl); + downsize(static_cast(self), writeContext, impl); break; default: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE @@ -1851,9 +1858,9 @@ void downsize(Node *self, WriteContext *tls, ConflictSet::Impl *impl, // path to self. May invalidate children of self->parent. Returns a pointer to // the node after self. Precondition: `self->entryPresent` -Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, +Node *erase(Node *self, WriteContext *writeContext, ConflictSet::Impl *impl, bool logical) { - ++tls->accum.entries_erased; + ++writeContext->accum.entries_erased; assert(self->parent != nullptr); #if DEBUG_VERBOSE && !defined(NDEBUG) @@ -1871,13 +1878,13 @@ Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, if (self->numChildren != 0) { if (needsDownsize(self)) { - downsize(self, tls, impl, result); + downsize(self, writeContext, impl, result); } return result; } assert(self->getType() == Type_Node0); - tls->release((Node0 *)self); + writeContext->release((Node0 *)self); switch (parent->getType()) { case Type_Node0: // GCOVR_EXCL_LINE @@ -1894,7 +1901,7 @@ Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, } if (needsDownsize(parent3)) { - downsize(parent3, tls, impl, result); + downsize(parent3, writeContext, impl, result); } } break; case Type_Node16: { @@ -1909,7 +1916,7 @@ Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, } if (needsDownsize(parent16)) { - downsize(parent16, tls, impl, result); + downsize(parent16, writeContext, impl, result); } } break; case Type_Node48: { @@ -1934,10 +1941,10 @@ Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, parent48->index[parentIndex] = toRemoveChildrenIndex; parent48->reverseIndex[toRemoveChildrenIndex] = parentIndex; } - parent48->childMaxVersion[lastChildrenIndex] = tls->zero; + parent48->childMaxVersion[lastChildrenIndex] = writeContext->zero; if (needsDownsize(parent48)) { - downsize(parent48, tls, impl, result); + downsize(parent48, writeContext, impl, result); } } break; case Type_Node256: { @@ -1948,7 +1955,7 @@ Node *erase(Node *self, WriteContext *tls, ConflictSet::Impl *impl, --parent->numChildren; if (needsDownsize(parent256)) { - downsize(parent256, tls, impl, result); + downsize(parent256, writeContext, impl, result); } } break; default: // GCOVR_EXCL_LINE @@ -2154,16 +2161,16 @@ bool scan16(const InternalVersionT *vs, int begin, int end, // account for the range version of firstGt(searchpath(n) + [end - 1]) template bool checkMaxBetweenExclusiveImpl(Node0 *, int, int, InternalVersionT, - ReadContext *tls) { - ++tls->range_read_node_scan_accum; + ReadContext *readContext) { + ++readContext->range_read_node_scan_accum; return true; } template bool checkMaxBetweenExclusiveImpl(Node3 *n, int begin, int end, InternalVersionT readVersion, - ReadContext *tls) { - ++tls->range_read_node_scan_accum; + ReadContext *readContext) { + ++readContext->range_read_node_scan_accum; assume(-1 <= begin); assume(begin <= 256); assume(-1 <= end); @@ -2201,8 +2208,8 @@ bool checkMaxBetweenExclusiveImpl(Node3 *n, int begin, int end, template bool checkMaxBetweenExclusiveImpl(Node16 *n, int begin, int end, InternalVersionT readVersion, - ReadContext *tls) { - ++tls->range_read_node_scan_accum; + ReadContext *readContext) { + ++readContext->range_read_node_scan_accum; assume(-1 <= begin); assume(begin <= 256); assume(-1 <= end); @@ -2314,8 +2321,8 @@ bool checkMaxBetweenExclusiveImpl(Node16 *n, int begin, int end, template bool checkMaxBetweenExclusiveImpl(Node48 *n, int begin, int end, InternalVersionT readVersion, - ReadContext *tls) { - ++tls->range_read_node_scan_accum; + ReadContext *readContext) { + ++readContext->range_read_node_scan_accum; assume(-1 <= begin); assume(begin <= 256); assume(-1 <= end); @@ -2359,8 +2366,8 @@ bool checkMaxBetweenExclusiveImpl(Node48 *n, int begin, int end, template bool checkMaxBetweenExclusiveImpl(Node256 *n, int begin, int end, InternalVersionT readVersion, - ReadContext *tls) { - ++tls->range_read_node_scan_accum; + ReadContext *readContext) { + ++readContext->range_read_node_scan_accum; assume(-1 <= begin); assume(begin <= 256); assume(-1 <= end); @@ -2423,77 +2430,91 @@ bool checkMaxBetweenExclusiveImpl(Node256 *n, int begin, int end, } bool checkMaxBetweenExclusive(Node0 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, + ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } bool checkMaxBetweenExclusive(Node3 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, + ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } #if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__) __attribute__((target("avx512f"))) bool checkMaxBetweenExclusive(Node16 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, + ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } __attribute__((target("default"))) #endif bool checkMaxBetweenExclusive(Node16 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } #if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__) __attribute__((target("avx512f"))) bool checkMaxBetweenExclusive(Node48 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, + ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } __attribute__((target("default"))) #endif bool checkMaxBetweenExclusive(Node48 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } #if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__) __attribute__((target("avx512f"))) bool checkMaxBetweenExclusive(Node256 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, + ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } __attribute__((target("default"))) #endif bool checkMaxBetweenExclusive(Node256 *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { - return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, tls); + InternalVersionT readVersion, ReadContext *readContext) { + return checkMaxBetweenExclusiveImpl(n, begin, end, readVersion, + readContext); } #if defined(HAS_AVX) && !defined(__SANITIZE_THREAD__) __attribute__((target("avx512f"))) bool checkMaxBetweenExclusive(Node *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { + InternalVersionT readVersion, + ReadContext *readContext) { switch (n->getType()) { case Type_Node0: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node3: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node16: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node48: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node256: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); default: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE } @@ -2502,23 +2523,23 @@ __attribute__((target("default"))) #endif bool checkMaxBetweenExclusive(Node *n, int begin, int end, - InternalVersionT readVersion, ReadContext *tls) { + InternalVersionT readVersion, ReadContext *readContext) { switch (n->getType()) { case Type_Node0: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node3: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node16: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node48: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); case Type_Node256: return checkMaxBetweenExclusiveImpl(static_cast(n), begin, - end, readVersion, tls); + end, readVersion, readContext); default: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE } @@ -2549,14 +2570,15 @@ Vector getSearchPath(Arena &arena, Node *n) { template bool checkRangeStartsWith(NodeT *nTyped, std::span key, int begin, int end, InternalVersionT readVersion, - ReadContext *tls) { + ReadContext *readContext) { Node *n; #if DEBUG_VERBOSE && !defined(NDEBUG) fprintf(stderr, "%s(%02x,%02x)*\n", printable(key).c_str(), begin, end); #endif auto remaining = key; if (remaining.size() == 0) { - return checkMaxBetweenExclusive(nTyped, begin, end, readVersion, tls); + return checkMaxBetweenExclusive(nTyped, begin, end, readVersion, + readContext); } Node *child = getChild(nTyped, remaining[0]); @@ -2641,15 +2663,15 @@ checkMaxBetweenExclusiveImpl(Node256 *n, int begin, int end, [[nodiscard]] TaggedNodePointer *insert(TaggedNodePointer *self, std::span key, InternalVersionT writeVersion, - WriteContext *tls) { + WriteContext *writeContext) { - for (; key.size() != 0; ++tls->accum.insert_iterations) { - self = &getOrCreateChild(*self, key, writeVersion, tls); + for (; key.size() != 0; ++writeContext->accum.insert_iterations) { + self = &getOrCreateChild(*self, key, writeVersion, writeContext); } return self; } -void eraseTree(Node *root, WriteContext *tls) { +void eraseTree(Node *root, WriteContext *writeContext) { Arena arena; auto toFree = vector(arena); toFree.push_back(root); @@ -2657,43 +2679,43 @@ void eraseTree(Node *root, WriteContext *tls) { while (toFree.size() > 0) { auto *n = toFree.back(); toFree.pop_back(); - tls->accum.entries_erased += n->entryPresent; - ++tls->accum.nodes_released; + writeContext->accum.entries_erased += n->entryPresent; + ++writeContext->accum.nodes_released; removeKey(n); switch (n->getType()) { case Type_Node0: { auto *n0 = static_cast(n); - tls->release(n0); + writeContext->release(n0); } break; case Type_Node3: { auto *n3 = static_cast(n); for (int i = 0; i < n3->numChildren; ++i) { toFree.push_back(n3->children[i]); } - tls->release(n3); + writeContext->release(n3); } break; case Type_Node16: { auto *n16 = static_cast(n); for (int i = 0; i < n16->numChildren; ++i) { toFree.push_back(n16->children[i]); } - tls->release(n16); + writeContext->release(n16); } break; case Type_Node48: { auto *n48 = static_cast(n); for (int i = 0; i < n48->numChildren; ++i) { toFree.push_back(n48->children[i]); } - tls->release(n48); + writeContext->release(n48); } break; case Type_Node256: { auto *n256 = static_cast(n); auto *out = toFree.unsafePrepareAppend(n256->numChildren).data(); n256->bitSet.forEachSet([&](int i) { *out++ = n256->children[i]; }); assert(out == toFree.end()); - tls->release(n256); + writeContext->release(n256); } break; default: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE @@ -2702,11 +2724,11 @@ void eraseTree(Node *root, WriteContext *tls) { } void addPointWrite(TaggedNodePointer &root, std::span key, - InternalVersionT writeVersion, WriteContext *tls) { - ++tls->accum.point_writes; - auto n = *insert(&root, key, writeVersion, tls); + InternalVersionT writeVersion, WriteContext *writeContext) { + ++writeContext->accum.point_writes; + auto n = *insert(&root, key, writeVersion, writeContext); if (!n->entryPresent) { - ++tls->accum.entries_inserted; + ++writeContext->accum.entries_inserted; auto *p = nextLogical(n); addKey(n); @@ -2714,7 +2736,8 @@ void addPointWrite(TaggedNodePointer &root, std::span key, n->entry.pointVersion = writeVersion; n->entry.rangeVersion = - p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero); + p == nullptr ? writeContext->zero + : std::max(p->entry.rangeVersion, writeContext->zero); } else { assert(writeVersion >= n->entry.pointVersion); n->entry.pointVersion = writeVersion; @@ -2788,22 +2811,24 @@ horizontalMax16(InternalVersionT *vs, InternalVersionT) { } // Precondition: `node->entryPresent`, and node is not the root -void fixupMaxVersion(Node *node, WriteContext *tls) { +void fixupMaxVersion(Node *node, WriteContext *writeContext) { assert(node->parent); InternalVersionT max; assert(node->entryPresent); - max = std::max(node->entry.pointVersion, tls->zero); + max = std::max(node->entry.pointVersion, writeContext->zero); switch (node->getType()) { case Type_Node0: break; case Type_Node3: { auto *self3 = static_cast(node); - max = std::max(max, horizontalMaxUpTo16(self3->childMaxVersion, tls->zero, - self3->numChildren)); + max = std::max(max, + horizontalMaxUpTo16(self3->childMaxVersion, + writeContext->zero, self3->numChildren)); } break; case Type_Node16: { auto *self16 = static_cast(node); - max = std::max(max, horizontalMaxUpTo16(self16->childMaxVersion, tls->zero, + max = std::max(max, horizontalMaxUpTo16(self16->childMaxVersion, + writeContext->zero, self16->numChildren)); } break; case Type_Node48: { @@ -2814,7 +2839,8 @@ void fixupMaxVersion(Node *node, WriteContext *tls) { } break; case Type_Node256: { auto *self256 = static_cast(node); - max = std::max(max, horizontalMax16(self256->childMaxVersion, tls->zero)); + max = std::max( + max, horizontalMax16(self256->childMaxVersion, writeContext->zero)); } break; default: // GCOVR_EXCL_LINE __builtin_unreachable(); // GCOVR_EXCL_LINE @@ -2824,40 +2850,43 @@ void fixupMaxVersion(Node *node, WriteContext *tls) { void addWriteRange(TaggedNodePointer &root, std::span begin, std::span end, InternalVersionT writeVersion, - WriteContext *tls, ConflictSet::Impl *impl) { + WriteContext *writeContext, ConflictSet::Impl *impl) { int lcp = longestCommonPrefix(begin.data(), end.data(), std::min(begin.size(), end.size())); if (lcp == int(begin.size()) && end.size() == begin.size() + 1 && end.back() == 0) { - return addPointWrite(root, begin, writeVersion, tls); + return addPointWrite(root, begin, writeVersion, writeContext); } - ++tls->accum.range_writes; + ++writeContext->accum.range_writes; const bool beginIsPrefix = lcp == int(begin.size()); - auto useAsRoot = insert(&root, begin.subspan(0, lcp), writeVersion, tls); + auto useAsRoot = + insert(&root, begin.subspan(0, lcp), writeVersion, writeContext); begin = begin.subspan(lcp, begin.size() - lcp); end = end.subspan(lcp, end.size() - lcp); - Node *beginNode = *insert(useAsRoot, begin, writeVersion, tls); + Node *beginNode = *insert(useAsRoot, begin, writeVersion, writeContext); addKey(beginNode); if (!beginNode->entryPresent) { - ++tls->accum.entries_inserted; + ++writeContext->accum.entries_inserted; auto *p = nextLogical(beginNode); beginNode->entry.rangeVersion = - p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero); + p == nullptr ? writeContext->zero + : std::max(p->entry.rangeVersion, writeContext->zero); beginNode->entryPresent = true; } beginNode->entry.pointVersion = writeVersion; - Node *endNode = *insert(useAsRoot, end, writeVersion, tls); + Node *endNode = *insert(useAsRoot, end, writeVersion, writeContext); addKey(endNode); if (!endNode->entryPresent) { - ++tls->accum.entries_inserted; + ++writeContext->accum.entries_inserted; auto *p = nextLogical(endNode); endNode->entry.pointVersion = - p == nullptr ? tls->zero : std::max(p->entry.rangeVersion, tls->zero); + p == nullptr ? writeContext->zero + : std::max(p->entry.rangeVersion, writeContext->zero); if (beginIsPrefix) { // beginNode may have been invalidated when inserting end beginNode = *useAsRoot; @@ -2873,7 +2902,7 @@ void addWriteRange(TaggedNodePointer &root, std::span begin, endNode->endOfRange = true; Node *iter = beginNode; for (iter = nextLogical(iter); !iter->endOfRange; - iter = erase(iter, tls, impl, /*logical*/ true)) { + iter = erase(iter, writeContext, impl, /*logical*/ true)) { assert(!iter->endOfRange); } assert(iter->endOfRange); @@ -2881,7 +2910,7 @@ void addWriteRange(TaggedNodePointer &root, std::span begin, // Inserting end trashed the last node's maxVersion. Fix that. Safe to call // since the end key always has non-zero size. - fixupMaxVersion(iter, tls); + fixupMaxVersion(iter, writeContext); } Node *firstGeqPhysical(Node *n, const std::span key) { @@ -2988,7 +3017,7 @@ struct CheckContext { const ConflictSet::ReadRange *queries; ConflictSet::Result *results; int64_t started; - ReadContext tls; + ReadContext readContext; }; PRESERVE_NONE void keepGoing(CheckJob *job, CheckContext *context) { @@ -3032,7 +3061,7 @@ static Continuation iterTable[] = {iter, iter, iter, iter, iter}; void begin(CheckJob *job, CheckContext *context) { - ++context->tls.point_read_accum; + ++context->readContext.point_read_accum; #if DEBUG_VERBOSE && !defined(NDEBUG) fprintf(stderr, "Check point read: %s\n", printable(key).c_str()); #endif @@ -3102,11 +3131,11 @@ template void iter(CheckJob *job, CheckContext *context) { } } - ++context->tls.point_read_iterations_accum; + ++context->readContext.point_read_iterations_accum; if (job->maxV <= job->readVersion) { job->setResult(true); - ++context->tls.point_read_short_circuit_accum; + ++context->readContext.point_read_short_circuit_accum; MUSTTAIL return complete(job, context); } @@ -3160,7 +3189,7 @@ static Continuation iterTable[] = {iter, iter, iter, iter, iter}; void begin(CheckJob *job, CheckContext *context) { - ++context->tls.prefix_read_accum; + ++context->readContext.prefix_read_accum; #if DEBUG_VERBOSE && !defined(NDEBUG) fprintf(stderr, "Check prefix read: %s\n", printable(key).c_str()); #endif @@ -3231,11 +3260,11 @@ template void iter(CheckJob *job, CheckContext *context) { } } - ++context->tls.prefix_read_iterations_accum; + ++context->readContext.prefix_read_iterations_accum; if (job->maxV <= job->readVersion) { job->setResult(true); - ++context->tls.prefix_read_short_circuit_accum; + ++context->readContext.prefix_read_short_circuit_accum; MUSTTAIL return complete(job, context); } @@ -3327,7 +3356,7 @@ PRESERVE_NONE void begin(CheckJob *job, CheckContext *context) { MUSTTAIL return job->continuation(job, context); } - ++context->tls.range_read_accum; + ++context->readContext.range_read_accum; job->remaining = job->begin.subspan(0, job->lcp); if (job->remaining.size() == 0) { @@ -3369,11 +3398,11 @@ void common_prefix_iter(CheckJob *job, CheckContext *context) { job->remaining.size() - (1 + child->partialKeyLen)); - ++context->tls.range_read_iterations_accum; + ++context->readContext.range_read_iterations_accum; if (job->maxV <= job->readVersion) { job->setResult(true); - ++context->tls.range_read_short_circuit_accum; + ++context->readContext.range_read_short_circuit_accum; MUSTTAIL return complete(job, context); } @@ -3425,7 +3454,7 @@ PRESERVE_NONE void done_common_prefix_iter(CheckJob *job, } if (!checkMaxBetweenExclusive(n, -1, job->remaining[0], job->readVersion, - &context->tls)) { + &context->readContext)) { job->setResult(false); MUSTTAIL return complete(job, context); } @@ -3464,7 +3493,7 @@ PRESERVE_NONE void done_common_prefix_iter(CheckJob *job, if (!checkRangeStartsWith(n, job->begin.subspan(0, job->lcp), job->begin[job->lcp], job->end[job->lcp], - job->readVersion, &context->tls)) { + job->readVersion, &context->readContext)) { job->setResult(false); MUSTTAIL return complete(job, context); } @@ -3559,7 +3588,7 @@ PRESERVE_NONE void left_side_iter(CheckJob *job, CheckContext *context) { } } - ++context->tls.range_read_iterations_accum; + ++context->readContext.range_read_iterations_accum; if (job->maxV <= job->readVersion) { job->continuation = done_left_side_iter; @@ -3573,7 +3602,7 @@ PRESERVE_NONE void left_side_iter(CheckJob *job, CheckContext *context) { } if (!checkMaxBetweenExclusive(n, job->remaining[0], 256, job->readVersion, - &context->tls)) { + &context->readContext)) { job->setResult(false); MUSTTAIL return complete(job, context); } @@ -3697,7 +3726,7 @@ PRESERVE_NONE void right_side_iter(CheckJob *job, CheckContext *context) { } } - ++context->tls.range_read_iterations_accum; + ++context->readContext.range_read_iterations_accum; if (job->remaining.size() == 0) { job->continuation = down_left_spine; @@ -3711,7 +3740,7 @@ PRESERVE_NONE void right_side_iter(CheckJob *job, CheckContext *context) { } if (!checkMaxBetweenExclusive(n, -1, job->remaining[0], job->readVersion, - &context->tls)) { + &context->readContext)) { job->setResult(false); MUSTTAIL return complete(job, context); } @@ -3773,13 +3802,13 @@ namespace { // point or range version according to cmp, but this version short circuits as // soon as it can prove that there's no conflict. bool checkPointRead(Node *n, const std::span key, - InternalVersionT readVersion, ReadContext *tls) { - ++tls->point_read_accum; + InternalVersionT readVersion, ReadContext *readContext) { + ++readContext->point_read_accum; #if DEBUG_VERBOSE && !defined(NDEBUG) fprintf(stderr, "Check point read: %s\n", printable(key).c_str()); #endif auto remaining = key; - for (;; ++tls->point_read_iterations_accum) { + for (;; ++readContext->point_read_iterations_accum) { if (remaining.size() == 0) { if (n->entryPresent) { return n->entry.pointVersion <= readVersion; @@ -3833,7 +3862,7 @@ bool checkPointRead(Node *n, const std::span key, } if (maxV <= readVersion) { - ++tls->point_read_short_circuit_accum; + ++readContext->point_read_short_circuit_accum; return true; } } @@ -3847,13 +3876,13 @@ downLeftSpine: // max version or range version if this prefix doesn't exist, but this version // short circuits as soon as it can prove that there's no conflict. bool checkPrefixRead(Node *n, const std::span key, - InternalVersionT readVersion, ReadContext *tls) { - ++tls->prefix_read_accum; + InternalVersionT readVersion, ReadContext *readContext) { + ++readContext->prefix_read_accum; #if DEBUG_VERBOSE && !defined(NDEBUG) fprintf(stderr, "Check prefix read: %s\n", printable(key).c_str()); #endif auto remaining = key; - for (;; ++tls->prefix_read_iterations_accum) { + for (;; ++readContext->prefix_read_iterations_accum) { if (remaining.size() == 0) { // There's no way to encode a prefix read of "", so n is not the root return maxVersion(n) <= readVersion; @@ -3908,7 +3937,7 @@ bool checkPrefixRead(Node *n, const std::span key, } if (maxV <= readVersion) { - ++tls->prefix_read_short_circuit_accum; + ++readContext->prefix_read_short_circuit_accum; return true; } } @@ -3921,17 +3950,19 @@ downLeftSpine: // Return true if the max version among all keys that start with key[:prefixLen] // that are >= key is <= readVersion bool checkRangeLeftSide(Node *n, std::span key, int prefixLen, - InternalVersionT readVersion, ReadContext *tls) { + InternalVersionT readVersion, + ReadContext *readContext) { auto remaining = key; int searchPathLen = 0; - for (;; ++tls->range_read_iterations_accum) { + for (;; ++readContext->range_read_iterations_accum) { if (remaining.size() == 0) { assert(searchPathLen >= prefixLen); return maxVersion(n) <= readVersion; } if (searchPathLen >= prefixLen) { - if (!checkMaxBetweenExclusive(n, remaining[0], 256, readVersion, tls)) { + if (!checkMaxBetweenExclusive(n, remaining[0], 256, readVersion, + readContext)) { return false; } } @@ -4006,11 +4037,12 @@ downLeftSpine: // Return true if the max version among all keys that start with key[:prefixLen] // that are < key is <= readVersion bool checkRangeRightSide(Node *n, std::span key, int prefixLen, - InternalVersionT readVersion, ReadContext *tls) { + InternalVersionT readVersion, + ReadContext *readContext) { auto remaining = key; int searchPathLen = 0; - for (;; ++tls->range_read_iterations_accum) { + for (;; ++readContext->range_read_iterations_accum) { assert(searchPathLen <= int(key.size())); if (remaining.size() == 0) { goto downLeftSpine; @@ -4021,7 +4053,8 @@ bool checkRangeRightSide(Node *n, std::span key, int prefixLen, return false; } - if (!checkMaxBetweenExclusive(n, -1, remaining[0], readVersion, tls)) { + if (!checkMaxBetweenExclusive(n, -1, remaining[0], readVersion, + readContext)) { return false; } } @@ -4098,25 +4131,25 @@ downLeftSpine: } bool checkRangeRead(Node *n, std::span begin, std::span end, InternalVersionT readVersion, - ReadContext *tls) { + ReadContext *readContext) { int lcp = longestCommonPrefix(begin.data(), end.data(), std::min(begin.size(), end.size())); if (lcp == int(begin.size()) && end.size() == begin.size() + 1 && end.back() == 0) { - return checkPointRead(n, begin, readVersion, tls); + return checkPointRead(n, begin, readVersion, readContext); } if (lcp == int(begin.size() - 1) && end.size() == begin.size() && int(begin.back()) + 1 == int(end.back())) { - return checkPrefixRead(n, begin, readVersion, tls); + return checkPrefixRead(n, begin, readVersion, readContext); } - ++tls->range_read_accum; + ++readContext->range_read_accum; auto remaining = begin.subspan(0, lcp); Arena arena; // Advance down common prefix, but stay on a physical path in the tree - for (;; ++tls->range_read_iterations_accum) { + for (;; ++readContext->range_read_iterations_accum) { assert(getSearchPath(arena, n) <=> begin.subspan(0, lcp - remaining.size()) == 0); @@ -4138,7 +4171,7 @@ bool checkRangeRead(Node *n, std::span begin, } } if (v <= readVersion) { - ++tls->range_read_short_circuit_accum; + ++readContext->range_read_short_circuit_accum; return true; } n = child; @@ -4157,7 +4190,7 @@ bool checkRangeRead(Node *n, std::span begin, lcp -= consumed; if (lcp == int(begin.size())) { - return checkRangeRightSide(n, end, lcp, readVersion, tls); + return checkRangeRightSide(n, end, lcp, readVersion, readContext); } // This makes it safe to check maxVersion within checkRangeLeftSide. If this @@ -4165,9 +4198,9 @@ bool checkRangeRead(Node *n, std::span begin, assert(!(n->parent == nullptr && begin.size() == 0)); return checkRangeStartsWith(n, begin.subspan(0, lcp), begin[lcp], end[lcp], - readVersion, tls) && - checkRangeLeftSide(n, begin, lcp + 1, readVersion, tls) && - checkRangeRightSide(n, end, lcp + 1, readVersion, tls); + readVersion, readContext) && + checkRangeLeftSide(n, begin, lcp + 1, readVersion, readContext) && + checkRangeRightSide(n, end, lcp + 1, readVersion, readContext); } } // namespace @@ -4186,13 +4219,13 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { ok = checkPointRead( root, std::span(reads[i].begin.p, reads[i].begin.len), - InternalVersionT(reads[i].readVersion), &context.tls); + InternalVersionT(reads[i].readVersion), &context.readContext); } else { ok = checkRangeRead( root, std::span(reads[i].begin.p, reads[i].begin.len), std::span(reads[i].end.p, reads[i].end.len), - InternalVersionT(reads[i].readVersion), &context.tls); + InternalVersionT(reads[i].readVersion), &context.readContext); } result[i] = ok ? Commit : Conflict; } @@ -4209,7 +4242,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { int64_t check_byte_accum = 0; CheckContext context; - context.tls.impl = this; + context.readContext.impl = this; #if __has_attribute(preserve_none) && __has_attribute(musttail) if (count == 1) { @@ -4246,7 +4279,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { Arena arena; auto *results2 = new (arena) Result[count]; CheckContext context2; - context.tls.impl = this; + context.readContext.impl = this; useSequential(reads, results2, count, context2); assert(memcmp(result, results2, count) == 0); #endif @@ -4260,27 +4293,31 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { assert(reads[i].readVersion <= newestVersionFullPrecision); const auto &r = reads[i]; check_byte_accum += r.begin.len + r.end.len; - context.tls.commits_accum += result[i] == Commit; - context.tls.conflicts_accum += result[i] == Conflict; - context.tls.too_olds_accum += result[i] == TooOld; + context.readContext.commits_accum += result[i] == Commit; + context.readContext.conflicts_accum += result[i] == Conflict; + context.readContext.too_olds_accum += result[i] == TooOld; } - point_read_total.add(context.tls.point_read_accum); - prefix_read_total.add(context.tls.prefix_read_accum); - range_read_total.add(context.tls.range_read_accum); - range_read_node_scan_total.add(context.tls.range_read_node_scan_accum); + point_read_total.add(context.readContext.point_read_accum); + prefix_read_total.add(context.readContext.prefix_read_accum); + range_read_total.add(context.readContext.range_read_accum); + range_read_node_scan_total.add( + context.readContext.range_read_node_scan_accum); point_read_short_circuit_total.add( - context.tls.point_read_short_circuit_accum); + context.readContext.point_read_short_circuit_accum); prefix_read_short_circuit_total.add( - context.tls.prefix_read_short_circuit_accum); + context.readContext.prefix_read_short_circuit_accum); range_read_short_circuit_total.add( - context.tls.range_read_short_circuit_accum); - point_read_iterations_total.add(context.tls.point_read_iterations_accum); - prefix_read_iterations_total.add(context.tls.prefix_read_iterations_accum); - range_read_iterations_total.add(context.tls.range_read_iterations_accum); - commits_total.add(context.tls.commits_accum); - conflicts_total.add(context.tls.conflicts_accum); - too_olds_total.add(context.tls.too_olds_accum); + context.readContext.range_read_short_circuit_accum); + point_read_iterations_total.add( + context.readContext.point_read_iterations_accum); + prefix_read_iterations_total.add( + context.readContext.prefix_read_iterations_accum); + range_read_iterations_total.add( + context.readContext.range_read_iterations_accum); + commits_total.add(context.readContext.commits_accum); + conflicts_total.add(context.readContext.conflicts_accum); + too_olds_total.add(context.readContext.too_olds_accum); check_bytes_total.add(check_byte_accum); } @@ -4289,17 +4326,17 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { // There could be other conflict sets in the same thread. We need // InternalVersionT::zero to be correct for this conflict set for the // lifetime of the current call frame. - InternalVersionT::zero = tls.zero = oldestVersion; + InternalVersionT::zero = writeContext.zero = oldestVersion; #endif assert(writeVersion >= newestVersionFullPrecision); - assert(tls.accum.entries_erased == 0); - assert(tls.accum.entries_inserted == 0); + assert(writeContext.accum.entries_erased == 0); + assert(writeContext.accum.entries_inserted == 0); if (oldestExtantVersion < writeVersion - kMaxCorrectVersionWindow) [[unlikely]] { if (writeVersion > newestVersionFullPrecision + kNominalVersionWindow) { - eraseTree(root, &tls); + eraseTree(root, &writeContext); init(writeVersion - kNominalVersionWindow); } @@ -4324,32 +4361,33 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { for (int i = 0; i < count; ++i) { const auto &w = writes[i]; - tls.accum.write_bytes += w.begin.len + w.end.len; + writeContext.accum.write_bytes += w.begin.len + w.end.len; auto begin = std::span(w.begin.p, w.begin.len); auto end = std::span(w.end.p, w.end.len); if (w.end.len > 0) { - addWriteRange(root, begin, end, InternalVersionT(writeVersion), &tls, - this); + addWriteRange(root, begin, end, InternalVersionT(writeVersion), + &writeContext, this); } else { - addPointWrite(root, begin, InternalVersionT(writeVersion), &tls); + addPointWrite(root, begin, InternalVersionT(writeVersion), + &writeContext); } } // Run gc at least 200% the rate we're inserting entries - keyUpdates += - std::max(tls.accum.entries_inserted - tls.accum.entries_erased, - 0) * - 2; + keyUpdates += std::max(writeContext.accum.entries_inserted - + writeContext.accum.entries_erased, + 0) * + 2; - point_writes_total.add(tls.accum.point_writes); - range_writes_total.add(tls.accum.range_writes); - nodes_allocated_total.add(tls.accum.nodes_allocated); - nodes_released_total.add(tls.accum.nodes_released); - entries_inserted_total.add(tls.accum.entries_inserted); - entries_erased_total.add(tls.accum.entries_erased); - insert_iterations_total.add(tls.accum.insert_iterations); - write_bytes_total.add(tls.accum.write_bytes); - memset(&tls.accum, 0, sizeof(tls.accum)); + point_writes_total.add(writeContext.accum.point_writes); + range_writes_total.add(writeContext.accum.range_writes); + nodes_allocated_total.add(writeContext.accum.nodes_allocated); + nodes_released_total.add(writeContext.accum.nodes_released); + entries_inserted_total.add(writeContext.accum.entries_inserted); + entries_erased_total.add(writeContext.accum.entries_erased); + insert_iterations_total.add(writeContext.accum.insert_iterations); + write_bytes_total.add(writeContext.accum.write_bytes); + memset(&writeContext.accum, 0, sizeof(writeContext.accum)); } // Spends up to `fuel` gc'ing, and returns its unused fuel. Reclaims memory @@ -4378,9 +4416,9 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { // There's no way to insert a range such that range version of the // right node is greater than the point version of the left node assert(n->entry.rangeVersion <= oldestVersion); - n = erase(n, &tls, this, /*logical*/ false); + n = erase(n, &writeContext, this, /*logical*/ false); } else { - maybeDecreaseCapacity(n, &tls, this); + maybeDecreaseCapacity(n, &writeContext, this); n = nextPhysical(n); } } @@ -4417,7 +4455,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { this->oldestVersionFullPrecision = newOldestVersion; this->oldestVersion = oldestVersion; #if !USE_64_BIT - InternalVersionT::zero = tls.zero = oldestVersion; + InternalVersionT::zero = writeContext.zero = oldestVersion; #endif #ifdef NDEBUG // This is here for performance reasons, since we want to amortize the @@ -4429,10 +4467,14 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { #endif keyUpdates = gcScanStep(keyUpdates); - nodes_allocated_total.add(std::exchange(tls.accum.nodes_allocated, 0)); - nodes_released_total.add(std::exchange(tls.accum.nodes_released, 0)); - entries_inserted_total.add(std::exchange(tls.accum.entries_inserted, 0)); - entries_erased_total.add(std::exchange(tls.accum.entries_erased, 0)); + nodes_allocated_total.add( + std::exchange(writeContext.accum.nodes_allocated, 0)); + nodes_released_total.add( + std::exchange(writeContext.accum.nodes_released, 0)); + entries_inserted_total.add( + std::exchange(writeContext.accum.entries_inserted, 0)); + entries_erased_total.add( + std::exchange(writeContext.accum.entries_erased, 0)); oldest_version.set(oldestVersionFullPrecision); } @@ -4446,15 +4488,15 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { newest_version.set(newestVersionFullPrecision); oldest_extant_version.set(oldestExtantVersion); - tls.~WriteContext(); - new (&tls) WriteContext(); + writeContext.~WriteContext(); + new (&writeContext) WriteContext(); removalKeyArena = Arena{}; removalKey = {}; keyUpdates = 10; // Insert "" - root = tls.allocate(0); + root = writeContext.allocate(0); root->numChildren = 0; root->parent = nullptr; root->entryPresent = false; @@ -4467,7 +4509,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { root->entry.rangeVersion = this->oldestVersion; #if !USE_64_BIT - InternalVersionT::zero = tls.zero = this->oldestVersion; + InternalVersionT::zero = writeContext.zero = this->oldestVersion; #endif // Intentionally not resetting totalBytes @@ -4479,11 +4521,11 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl { metrics = initMetrics(metricsList, metricsCount); } ~Impl() { - eraseTree(root, &tls); + eraseTree(root, &writeContext); safe_free(metrics, metricsCount * sizeof(metrics[0])); } - WriteContext tls; + WriteContext writeContext; Arena removalKeyArena; std::span removalKey;