3 Commits

Author SHA1 Message Date
351ff3df3b Cave in and just add the unreachable's gcc wants
All checks were successful
Tests / Clang total: 932, passed: 932
Clang |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Release [gcc] total: 932, passed: 932
Tests / Release [gcc,aarch64] total: 931, passed: 931
Tests / Coverage total: 930, passed: 930
weaselab/conflict-set/pipeline/head This commit looks good
2024-03-13 14:58:34 -07:00
e818648cdc makeCapacityAtLeast -> freeAndMakeCapacityAtLeast 2024-03-13 14:01:47 -07:00
12540b8713 maybeDecreaseCapacity policy was too strong
We can allow larger capacities and still pay for the key bytes
2024-03-13 14:00:01 -07:00

View File

@@ -292,9 +292,10 @@ static_assert(kNode3Surplus >= kMinNodeSurplus);
static_assert(kBytesPerKey - sizeof(Node0) >= kMinNodeSurplus); static_assert(kBytesPerKey - sizeof(Node0) >= kMinNodeSurplus);
// setOldestVersion will additionally try to maintain this property: // setOldestVersion will additionally try to maintain this property:
// `max(children, 1) * length >= capacity` // `(children + entryPresent) * length >= capacity`
// //
// Which should give us the budget to pay for the key bytes // Which should give us the budget to pay for the key bytes. (children +
// entryPresent) is a lower bound on how many keys these bytes are a prefix of
template <class T, int64_t kMemoryBound = (1 << 20)> template <class T, int64_t kMemoryBound = (1 << 20)>
struct BoundedFreeListAllocator { struct BoundedFreeListAllocator {
@@ -473,6 +474,7 @@ Node *&getChildExists(Node *self, uint8_t index) {
return self256->children[index].child; return self256->children[index].child;
} }
} }
__builtin_unreachable(); // GCOVR_EXCL_LINE
} }
// Precondition - an entry for index must exist in the node // Precondition - an entry for index must exist in the node
@@ -504,6 +506,7 @@ Node *getChild(Node *self, uint8_t index) {
return self256->children[index].child; return self256->children[index].child;
} }
} }
__builtin_unreachable(); // GCOVR_EXCL_LINE
} }
template <class NodeT> int getChildGeqSimd(NodeT *self, int child) { template <class NodeT> int getChildGeqSimd(NodeT *self, int child) {
@@ -583,6 +586,7 @@ int getChildGeq(Node *self, int child) {
return self48->bitSet.firstSetGeq(child); return self48->bitSet.firstSetGeq(child);
} }
} }
__builtin_unreachable(); // GCOVR_EXCL_LINE
} }
void setChildrenParents(Node3 *n) { void setChildrenParents(Node3 *n) {
@@ -775,6 +779,7 @@ Node *&getOrCreateChild(Node *&self, uint8_t index,
return self256->children[index].child; return self256->children[index].child;
} }
} }
__builtin_unreachable(); // GCOVR_EXCL_LINE
} }
Node *nextPhysical(Node *node) { Node *nextPhysical(Node *node) {
@@ -802,8 +807,9 @@ Node *nextLogical(Node *node) {
// Invalidates `self`, replacing it with a node of at least capacity. // Invalidates `self`, replacing it with a node of at least capacity.
// Does not return nodes to freelists when kUseFreeList is false. // Does not return nodes to freelists when kUseFreeList is false.
template <bool kUseFreeList> template <bool kUseFreeList>
void makeCapacityAtLeast(Node *&self, int capacity, NodeAllocators *allocators, void freeAndMakeCapacityAtLeast(Node *&self, int capacity,
ConflictSet::Impl *impl) { NodeAllocators *allocators,
ConflictSet::Impl *impl) {
switch (self->type) { switch (self->type) {
case Type_Node0: { case Type_Node0: {
auto *self0 = (Node0 *)self; auto *self0 = (Node0 *)self;
@@ -910,12 +916,12 @@ void makeCapacityAtLeast(Node *&self, int capacity, NodeAllocators *allocators,
void maybeDecreaseCapacity(Node *&self, NodeAllocators *allocators, void maybeDecreaseCapacity(Node *&self, NodeAllocators *allocators,
ConflictSet::Impl *impl) { ConflictSet::Impl *impl) {
const int maxCapacity = const int maxCapacity =
std::max<int>(self->numChildren, 1) * self->partialKeyLen; (self->numChildren + int(self->entryPresent)) * self->partialKeyLen;
if (self->partialKeyCapacity <= maxCapacity) { if (self->partialKeyCapacity <= maxCapacity) {
return; return;
} }
makeCapacityAtLeast</*kUseFreeList*/ false>(self, maxCapacity, allocators, freeAndMakeCapacityAtLeast</*kUseFreeList*/ false>(self, maxCapacity,
impl); allocators, impl);
} }
// TODO fuse into erase child so we don't need to repeat branches on type // TODO fuse into erase child so we don't need to repeat branches on type
@@ -946,8 +952,8 @@ void maybeDownsize(Node *self, NodeAllocators *allocators,
if (minCapacity > child->partialKeyCapacity) { if (minCapacity > child->partialKeyCapacity) {
const bool update = child == dontInvalidate; const bool update = child == dontInvalidate;
makeCapacityAtLeast</*kUseFreeList*/ true>(child, minCapacity, freeAndMakeCapacityAtLeast</*kUseFreeList*/ true>(child, minCapacity,
allocators, impl); allocators, impl);
if (update) { if (update) {
dontInvalidate = child; dontInvalidate = child;
} }
@@ -2342,6 +2348,7 @@ int64_t &maxVersion(Node *n, ConflictSet::Impl *impl) {
return n256->children[index].childMaxVersion; return n256->children[index].childMaxVersion;
} }
} }
__builtin_unreachable(); // GCOVR_EXCL_LINE
} }
Node *&getInTree(Node *n, ConflictSet::Impl *impl) { Node *&getInTree(Node *n, ConflictSet::Impl *impl) {