3 Commits

Author SHA1 Message Date
44a023c2f4 Bound individual size of allocation to put in free list
All checks were successful
Tests / Release [gcc] total: 827, passed: 827
GNU C Compiler (gcc) |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Release [gcc,aarch64] total: 826, passed: 826
Tests / Coverage total: 825, passed: 825
weaselab/conflict-set/pipeline/head This commit looks good
2024-03-08 22:41:18 -08:00
e32bea7b29 Enforce free list memory bound by tracking bytes directly 2024-03-08 22:30:38 -08:00
504a93bb10 Track partialKeyCapacity
If we use partialKeyLen, then the difference between partialKeyCapacity
and partialKeyLen will slowly grow. We have 3 padding bytes in Node now.
2024-03-08 21:42:26 -08:00

View File

@@ -174,18 +174,16 @@ struct Node {
Node *parent = nullptr;
Entry entry;
int32_t partialKeyLen = 0;
int16_t numChildren : 15 = 0;
bool entryPresent : 1 = false;
int16_t numChildren = 0;
bool entryPresent = false;
uint8_t parentsIndex = 0;
/* end section that's copied to the next node */
Type type;
#ifndef NDEBUG
// Leaving this uninitialized is intentional and necessary to expect asserts
// to pass. Basically it needs to be preserved when going to the free list and
// back.
// Leaving this uninitialized is intentional and necessary for correctness.
// Basically it needs to be preserved when going to the free list and back.
int32_t partialKeyCapacity;
#endif
uint8_t *partialKey();
};
@@ -262,7 +260,8 @@ static_assert(sizeof(Node4) < kMinChildrenNode4 * kBytesPerKey);
// Bounds memory usage in free list, but does not account for memory for partial
// keys.
template <class T, size_t kMemoryBound = (1 << 20)>
template <class T, int64_t kMemoryBound = (1 << 20),
int64_t kMaxIndividual = (1 << 10)>
struct BoundedFreeListAllocator {
static_assert(sizeof(T) >= sizeof(void *));
static_assert(std::derived_from<T, Node>);
@@ -274,24 +273,22 @@ struct BoundedFreeListAllocator {
#endif
if (freeList != nullptr) {
T *n = (T *)freeList;
VALGRIND_MAKE_MEM_DEFINED(n, sizeof(T));
if (n->partialKeyLen >= partialKeyCapacity) {
memcpy(&freeList, freeList, sizeof(freeList));
--freeListSize;
VALGRIND_MAKE_MEM_UNDEFINED(n, sizeof(T));
#ifndef NDEBUG
VALGRIND_MAKE_MEM_DEFINED(&n->partialKeyCapacity,
sizeof(n->partialKeyCapacity));
#endif
VALGRIND_MAKE_MEM_DEFINED(freeList, sizeof(freeList));
memcpy(&freeList, freeList, sizeof(freeList));
freeListBytes -= sizeof(T) + n->partialKeyCapacity;
if (n->partialKeyCapacity >= partialKeyCapacity) {
return new (n) T;
} else {
// The intent is to filter out too-small nodes in the freelist
free(n);
}
VALGRIND_MAKE_MEM_NOACCESS(n, sizeof(T));
}
auto *result = new (safe_malloc(sizeof(T) + partialKeyCapacity)) T;
#ifndef NDEBUG
result->partialKeyCapacity = partialKeyCapacity;
#endif
return result;
}
@@ -300,12 +297,13 @@ struct BoundedFreeListAllocator {
--liveAllocations;
#endif
static_assert(std::is_trivially_destructible_v<T>);
if (freeListSize == kMaxFreeListSize) {
if (sizeof(T) + p->partialKeyCapacity > kMaxIndividual ||
freeListBytes >= kMemoryBound) {
return free(p);
}
memcpy((void *)p, &freeList, sizeof(freeList));
freeList = p;
++freeListSize;
freeListBytes += sizeof(T) + p->partialKeyCapacity;
VALGRIND_MAKE_MEM_NOACCESS(freeList, sizeof(T));
}
@@ -323,10 +321,10 @@ struct BoundedFreeListAllocator {
#endif
private:
static constexpr int kMaxFreeListSize = kMemoryBound / sizeof(T);
int freeListSize = 0;
int64_t freeListBytes = 0;
void *freeList = nullptr;
#if SHOW_MEMORY
// TODO Track partial key bytes
int64_t maxLiveAllocations = 0;
int64_t liveAllocations = 0;
#endif