4 Commits

Author SHA1 Message Date
b8f6a8edf2 sizeof(Node0) also needs to be < kBytesPerKey
All checks were successful
Tests / Release [gcc] total: 827, passed: 827
GNU C Compiler (gcc) |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Release [gcc,aarch64] total: 826, passed: 826
Tests / Coverage total: 825, passed: 825
weaselab/conflict-set/pipeline/head This commit looks good
Also remove vestigial comment.

CC #9
2024-03-11 18:21:00 -07:00
01f1d5850f Create a Node0 when splitting existing partial key 2024-03-11 18:20:14 -07:00
cd567383c3 Only keep the assume's that actually improve codegen 2024-03-10 14:34:55 -07:00
53a442abf9 Use the assume attribute for gcc 2024-03-10 14:22:47 -07:00

View File

@@ -47,9 +47,7 @@ limitations under the License.
#if __has_builtin(__builtin_assume) #if __has_builtin(__builtin_assume)
#define assume(e) __builtin_assume(e) #define assume(e) __builtin_assume(e)
#else #else
#define assume(e) \ #define assume(e) __attribute__((assume(e)))
if (!(e)) \
__builtin_unreachable()
#endif #endif
#else #else
#define assume assert #define assume assert
@@ -145,20 +143,20 @@ private:
}; };
bool BitSet::test(int i) const { bool BitSet::test(int i) const {
assume(0 <= i); assert(0 <= i);
assume(i < 256); assert(i < 256);
return words[i >> 6] & (uint64_t(1) << (i & 63)); return words[i >> 6] & (uint64_t(1) << (i & 63));
} }
void BitSet::set(int i) { void BitSet::set(int i) {
assume(0 <= i); assert(0 <= i);
assume(i < 256); assert(i < 256);
words[i >> 6] |= uint64_t(1) << (i & 63); words[i >> 6] |= uint64_t(1) << (i & 63);
} }
void BitSet::reset(int i) { void BitSet::reset(int i) {
assume(0 <= i); assert(0 <= i);
assume(i < 256); assert(i < 256);
words[i >> 6] &= ~(uint64_t(1) << (i & 63)); words[i >> 6] &= ~(uint64_t(1) << (i & 63));
} }
@@ -273,9 +271,8 @@ static_assert(sizeof(Node256) < kMinChildrenNode256 * kBytesPerKey);
static_assert(sizeof(Node48) < kMinChildrenNode48 * kBytesPerKey); static_assert(sizeof(Node48) < kMinChildrenNode48 * kBytesPerKey);
static_assert(sizeof(Node16) < kMinChildrenNode16 * kBytesPerKey); static_assert(sizeof(Node16) < kMinChildrenNode16 * kBytesPerKey);
static_assert(sizeof(Node4) < kMinChildrenNode4 * kBytesPerKey); static_assert(sizeof(Node4) < kMinChildrenNode4 * kBytesPerKey);
static_assert(sizeof(Node0) < kBytesPerKey);
// Bounds memory usage in free list, but does not account for memory for partial
// keys.
template <class T, int64_t kMemoryBound = (1 << 20), template <class T, int64_t kMemoryBound = (1 << 20),
int64_t kMaxIndividual = (1 << 10)> int64_t kMaxIndividual = (1 << 10)>
struct BoundedFreeListAllocator { struct BoundedFreeListAllocator {
@@ -958,7 +955,7 @@ struct SearchStepWise {
SearchStepWise() {} SearchStepWise() {}
SearchStepWise(Node *n, std::span<const uint8_t> remaining) SearchStepWise(Node *n, std::span<const uint8_t> remaining)
: n(n), remaining(remaining) { : n(n), remaining(remaining) {
assume(n->partialKeyLen == 0); assert(n->partialKeyLen == 0);
} }
bool step() { bool step() {
@@ -1582,7 +1579,7 @@ template <bool kBegin>
auto *old = *self; auto *old = *self;
int64_t oldMaxVersion = maxVersion(old, impl); int64_t oldMaxVersion = maxVersion(old, impl);
*self = allocators->node4.allocate(partialKeyIndex); *self = allocators->node0.allocate(partialKeyIndex);
memcpy((char *)*self + kNodeCopyBegin, (char *)old + kNodeCopyBegin, memcpy((char *)*self + kNodeCopyBegin, (char *)old + kNodeCopyBegin,
kNodeCopySize); kNodeCopySize);