3 Commits

Author SHA1 Message Date
44a023c2f4 Bound individual size of allocation to put in free list
All checks were successful
Tests / Release [gcc] total: 827, passed: 827
GNU C Compiler (gcc) |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Release [gcc,aarch64] total: 826, passed: 826
Tests / Coverage total: 825, passed: 825
weaselab/conflict-set/pipeline/head This commit looks good
2024-03-08 22:41:18 -08:00
e32bea7b29 Enforce free list memory bound by tracking bytes directly 2024-03-08 22:30:38 -08:00
504a93bb10 Track partialKeyCapacity
If we use partialKeyLen, then the difference between partialKeyCapacity
and partialKeyLen will slowly grow. We have 3 padding bytes in Node now.
2024-03-08 21:42:26 -08:00

View File

@@ -174,18 +174,16 @@ struct Node {
Node *parent = nullptr; Node *parent = nullptr;
Entry entry; Entry entry;
int32_t partialKeyLen = 0; int32_t partialKeyLen = 0;
int16_t numChildren : 15 = 0; int16_t numChildren = 0;
bool entryPresent : 1 = false; bool entryPresent = false;
uint8_t parentsIndex = 0; uint8_t parentsIndex = 0;
/* end section that's copied to the next node */ /* end section that's copied to the next node */
Type type; Type type;
#ifndef NDEBUG
// Leaving this uninitialized is intentional and necessary to expect asserts // Leaving this uninitialized is intentional and necessary for correctness.
// to pass. Basically it needs to be preserved when going to the free list and // Basically it needs to be preserved when going to the free list and back.
// back.
int32_t partialKeyCapacity; int32_t partialKeyCapacity;
#endif
uint8_t *partialKey(); uint8_t *partialKey();
}; };
@@ -262,7 +260,8 @@ static_assert(sizeof(Node4) < kMinChildrenNode4 * kBytesPerKey);
// Bounds memory usage in free list, but does not account for memory for partial // Bounds memory usage in free list, but does not account for memory for partial
// keys. // keys.
template <class T, size_t kMemoryBound = (1 << 20)> template <class T, int64_t kMemoryBound = (1 << 20),
int64_t kMaxIndividual = (1 << 10)>
struct BoundedFreeListAllocator { struct BoundedFreeListAllocator {
static_assert(sizeof(T) >= sizeof(void *)); static_assert(sizeof(T) >= sizeof(void *));
static_assert(std::derived_from<T, Node>); static_assert(std::derived_from<T, Node>);
@@ -274,24 +273,22 @@ struct BoundedFreeListAllocator {
#endif #endif
if (freeList != nullptr) { if (freeList != nullptr) {
T *n = (T *)freeList; T *n = (T *)freeList;
VALGRIND_MAKE_MEM_DEFINED(n, sizeof(T)); VALGRIND_MAKE_MEM_UNDEFINED(n, sizeof(T));
if (n->partialKeyLen >= partialKeyCapacity) { VALGRIND_MAKE_MEM_DEFINED(&n->partialKeyCapacity,
memcpy(&freeList, freeList, sizeof(freeList)); sizeof(n->partialKeyCapacity));
--freeListSize; VALGRIND_MAKE_MEM_DEFINED(freeList, sizeof(freeList));
VALGRIND_MAKE_MEM_UNDEFINED(n, sizeof(T)); memcpy(&freeList, freeList, sizeof(freeList));
#ifndef NDEBUG freeListBytes -= sizeof(T) + n->partialKeyCapacity;
VALGRIND_MAKE_MEM_DEFINED(&n->partialKeyCapacity, if (n->partialKeyCapacity >= partialKeyCapacity) {
sizeof(n->partialKeyCapacity));
#endif
return new (n) T; return new (n) T;
} else {
// The intent is to filter out too-small nodes in the freelist
free(n);
} }
VALGRIND_MAKE_MEM_NOACCESS(n, sizeof(T));
} }
auto *result = new (safe_malloc(sizeof(T) + partialKeyCapacity)) T; auto *result = new (safe_malloc(sizeof(T) + partialKeyCapacity)) T;
#ifndef NDEBUG
result->partialKeyCapacity = partialKeyCapacity; result->partialKeyCapacity = partialKeyCapacity;
#endif
return result; return result;
} }
@@ -300,12 +297,13 @@ struct BoundedFreeListAllocator {
--liveAllocations; --liveAllocations;
#endif #endif
static_assert(std::is_trivially_destructible_v<T>); static_assert(std::is_trivially_destructible_v<T>);
if (freeListSize == kMaxFreeListSize) { if (sizeof(T) + p->partialKeyCapacity > kMaxIndividual ||
freeListBytes >= kMemoryBound) {
return free(p); return free(p);
} }
memcpy((void *)p, &freeList, sizeof(freeList)); memcpy((void *)p, &freeList, sizeof(freeList));
freeList = p; freeList = p;
++freeListSize; freeListBytes += sizeof(T) + p->partialKeyCapacity;
VALGRIND_MAKE_MEM_NOACCESS(freeList, sizeof(T)); VALGRIND_MAKE_MEM_NOACCESS(freeList, sizeof(T));
} }
@@ -323,10 +321,10 @@ struct BoundedFreeListAllocator {
#endif #endif
private: private:
static constexpr int kMaxFreeListSize = kMemoryBound / sizeof(T); int64_t freeListBytes = 0;
int freeListSize = 0;
void *freeList = nullptr; void *freeList = nullptr;
#if SHOW_MEMORY #if SHOW_MEMORY
// TODO Track partial key bytes
int64_t maxLiveAllocations = 0; int64_t maxLiveAllocations = 0;
int64_t liveAllocations = 0; int64_t liveAllocations = 0;
#endif #endif