diff --git a/ConflictSet.cpp b/ConflictSet.cpp index d9f1889..58b2b7d 100644 --- a/ConflictSet.cpp +++ b/ConflictSet.cpp @@ -260,7 +260,7 @@ static_assert(sizeof(Node4) < kMinChildrenNode4 * kBytesPerKey); // Bounds memory usage in free list, but does not account for memory for partial // keys. -template +template struct BoundedFreeListAllocator { static_assert(sizeof(T) >= sizeof(void *)); static_assert(std::derived_from); @@ -275,12 +275,15 @@ struct BoundedFreeListAllocator { VALGRIND_MAKE_MEM_UNDEFINED(n, sizeof(T)); VALGRIND_MAKE_MEM_DEFINED(&n->partialKeyCapacity, sizeof(n->partialKeyCapacity)); + VALGRIND_MAKE_MEM_DEFINED(freeList, sizeof(freeList)); + memcpy(&freeList, freeList, sizeof(freeList)); + freeListBytes -= sizeof(T) + n->partialKeyCapacity; if (n->partialKeyCapacity >= partialKeyCapacity) { - memcpy(&freeList, freeList, sizeof(freeList)); - --freeListSize; return new (n) T; + } else { + // The intent is to filter out too-small nodes in the freelist + free(n); } - VALGRIND_MAKE_MEM_NOACCESS(n, sizeof(T)); } auto *result = new (safe_malloc(sizeof(T) + partialKeyCapacity)) T; @@ -293,12 +296,12 @@ struct BoundedFreeListAllocator { --liveAllocations; #endif static_assert(std::is_trivially_destructible_v); - if (freeListSize == kMaxFreeListSize) { + if (freeListBytes >= kMemoryBound) { return free(p); } memcpy((void *)p, &freeList, sizeof(freeList)); freeList = p; - ++freeListSize; + freeListBytes += sizeof(T) + p->partialKeyCapacity; VALGRIND_MAKE_MEM_NOACCESS(freeList, sizeof(T)); } @@ -316,10 +319,10 @@ struct BoundedFreeListAllocator { #endif private: - static constexpr int kMaxFreeListSize = kMemoryBound / sizeof(T); - int freeListSize = 0; + int64_t freeListBytes = 0; void *freeList = nullptr; #if SHOW_MEMORY + // TODO Track partial key bytes int64_t maxLiveAllocations = 0; int64_t liveAllocations = 0; #endif