Fix wrong order of arguments to update
This commit is contained in:
@@ -15,6 +15,11 @@
|
|||||||
#define DEBUG_VERBOSE 0
|
#define DEBUG_VERBOSE 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if DEBUG_VERBOSE
|
||||||
|
// Use to toggle debug verbose dynamically
|
||||||
|
bool debugVerboseEnabled = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
void *mmapSafe(void *addr, size_t len, int prot, int flags, int fd,
|
void *mmapSafe(void *addr, size_t len, int prot, int flags, int fd,
|
||||||
off_t offset) {
|
off_t offset) {
|
||||||
void *result = mmap(addr, len, prot, flags, fd, offset);
|
void *result = mmap(addr, len, prot, flags, fd, offset);
|
||||||
@@ -271,7 +276,9 @@ struct MemManager {
|
|||||||
for (int i = newFirstUnaddressable; i < firstUnaddressable; ++i) {
|
for (int i = newFirstUnaddressable; i < firstUnaddressable; ++i) {
|
||||||
if (base[i].entry != nullptr) {
|
if (base[i].entry != nullptr) {
|
||||||
#if DEBUG_VERBOSE
|
#if DEBUG_VERBOSE
|
||||||
printf("Collecting %u\n", i);
|
if (debugVerboseEnabled) {
|
||||||
|
printf("Collecting %u while shrinking right\n", i);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
base[i].entry->delref();
|
base[i].entry->delref();
|
||||||
}
|
}
|
||||||
@@ -289,7 +296,9 @@ struct MemManager {
|
|||||||
[&](uint32_t i) {
|
[&](uint32_t i) {
|
||||||
if (base[i].entry != nullptr) {
|
if (base[i].entry != nullptr) {
|
||||||
#if DEBUG_VERBOSE
|
#if DEBUG_VERBOSE
|
||||||
printf("Collecting %u\n", i);
|
if (debugVerboseEnabled) {
|
||||||
|
printf("Collecting %u while building free list\n", i);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
base[i].entry->delref();
|
base[i].entry->delref();
|
||||||
base[i].entry = nullptr;
|
base[i].entry = nullptr;
|
||||||
@@ -430,12 +439,15 @@ struct VersionedMap::Impl {
|
|||||||
static_assert(kOrder == std::memory_order_acquire ||
|
static_assert(kOrder == std::memory_order_acquire ||
|
||||||
kOrder == std::memory_order_relaxed);
|
kOrder == std::memory_order_relaxed);
|
||||||
auto &n = mm.base[node];
|
auto &n = mm.base[node];
|
||||||
|
uint32_t result;
|
||||||
if (n.updated.load(kOrder) && n.updateVersion <= at &&
|
if (n.updated.load(kOrder) && n.updateVersion <= at &&
|
||||||
which == n.replacedPointer) {
|
which == n.replacedPointer) {
|
||||||
return n.pointer[2];
|
result = n.pointer[2];
|
||||||
} else {
|
} else {
|
||||||
return n.pointer[which];
|
result = n.pointer[which];
|
||||||
}
|
}
|
||||||
|
assert(result == 0 || result >= kMinAddressable);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <std::memory_order kOrder>
|
template <std::memory_order kOrder>
|
||||||
@@ -449,7 +461,9 @@ struct VersionedMap::Impl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the node that results from setting `which` to `child` on `node`
|
// Returns the node that results from setting `which` to `child` on `node`
|
||||||
uint32_t update(uint32_t node, int64_t version, bool which, uint32_t child) {
|
uint32_t update(uint32_t node, bool which, uint32_t child, int64_t version) {
|
||||||
|
assert(node == 0 || node >= kMinAddressable);
|
||||||
|
assert(child == 0 || child >= kMinAddressable);
|
||||||
if (this->child<std::memory_order_relaxed>(node, which, version) == child) {
|
if (this->child<std::memory_order_relaxed>(node, which, version) == child) {
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
@@ -464,10 +478,13 @@ struct VersionedMap::Impl {
|
|||||||
c.pointer[!which] = n.pointer[!which];
|
c.pointer[!which] = n.pointer[!which];
|
||||||
c.updated.store(false, std::memory_order_relaxed);
|
c.updated.store(false, std::memory_order_relaxed);
|
||||||
c.updateVersion = version;
|
c.updateVersion = version;
|
||||||
|
assert(copy == 0 || copy >= kMinAddressable);
|
||||||
return copy;
|
return copy;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (n.updateVersion == version) {
|
if (n.updateVersion == version) {
|
||||||
|
// The reason these aren't data races is that concurrent readers are
|
||||||
|
// reading < `version`
|
||||||
if (updated && n.replacedPointer != which) {
|
if (updated && n.replacedPointer != which) {
|
||||||
// We can't update n.replacedPointer without introducing a data race
|
// We can't update n.replacedPointer without introducing a data race
|
||||||
// (unless we packed it into the atomic?) so we copy. pointer[2] becomes
|
// (unless we packed it into the atomic?) so we copy. pointer[2] becomes
|
||||||
@@ -479,6 +496,7 @@ struct VersionedMap::Impl {
|
|||||||
} else {
|
} else {
|
||||||
n.pointer[which] = child;
|
n.pointer[which] = child;
|
||||||
}
|
}
|
||||||
|
assert(node == 0 || node >= kMinAddressable);
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -490,6 +508,7 @@ struct VersionedMap::Impl {
|
|||||||
n.pointer[2] = child;
|
n.pointer[2] = child;
|
||||||
n.replacedPointer = which;
|
n.replacedPointer = which;
|
||||||
n.updated.store(true, std::memory_order_release); // Must be last
|
n.updated.store(true, std::memory_order_release); // Must be last
|
||||||
|
assert(node == 0 || node >= kMinAddressable);
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -544,9 +563,9 @@ struct VersionedMap::Impl {
|
|||||||
finger.pop();
|
finger.pop();
|
||||||
auto parent = finger.back();
|
auto parent = finger.back();
|
||||||
const bool direction = directionStack[--directionStackSize];
|
const bool direction = directionStack[--directionStackSize];
|
||||||
parent = update(parent, latestVersion, direction, node);
|
parent = update(parent, direction, node, latestVersion);
|
||||||
if (mm.base[node].entry->priority > mm.base[parent].entry->priority) {
|
if (mm.base[node].entry->priority > mm.base[parent].entry->priority) {
|
||||||
rotate(parent, latestVersion, direction);
|
rotate(parent, latestVersion, !direction);
|
||||||
} else {
|
} else {
|
||||||
if (parent == finger.back()) {
|
if (parent == finger.back()) {
|
||||||
break;
|
break;
|
||||||
@@ -610,7 +629,6 @@ struct VersionedMap::Impl {
|
|||||||
#include <nanobench.h>
|
#include <nanobench.h>
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
|
|
||||||
{
|
{
|
||||||
weaselab::VersionedMap::Impl impl;
|
weaselab::VersionedMap::Impl impl;
|
||||||
impl.latestRoot = impl.roots.roots()[impl.roots.rootCount() - 1];
|
impl.latestRoot = impl.roots.roots()[impl.roots.rootCount() - 1];
|
||||||
@@ -630,7 +648,8 @@ int main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ankerl::nanobench::Bench bench;
|
ankerl::nanobench::Bench bench;
|
||||||
bench.minEpochIterations(5000);
|
bench.minEpochIterations(10000);
|
||||||
|
|
||||||
weaselab::MemManager mm;
|
weaselab::MemManager mm;
|
||||||
bench.run("allocate", [&]() {
|
bench.run("allocate", [&]() {
|
||||||
auto x = mm.allocate();
|
auto x = mm.allocate();
|
||||||
@@ -670,5 +689,29 @@ int main() {
|
|||||||
bench.doNotOptimizeAway(roots.rootForVersion(i - kNumVersions / 2));
|
bench.doNotOptimizeAway(roots.rootForVersion(i - kNumVersions / 2));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
weaselab::VersionedMap::Impl impl;
|
||||||
|
bench.run("insert fixed entry", [&]() {
|
||||||
|
++impl.latestVersion;
|
||||||
|
impl.latestRoot = impl.roots.roots()[impl.roots.rootCount() - 1];
|
||||||
|
impl.insert(weaselab::VersionedMap::Mutation{
|
||||||
|
(const uint8_t *)"a", nullptr, 1, 0, weaselab::VersionedMap::Set});
|
||||||
|
impl.roots.add(impl.latestRoot, impl.latestVersion);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
weaselab::VersionedMap::Impl impl;
|
||||||
|
bench.run("insert fresh entry", [&]() {
|
||||||
|
++impl.latestVersion;
|
||||||
|
impl.latestRoot = impl.roots.roots()[impl.roots.rootCount() - 1];
|
||||||
|
auto k = __builtin_bswap64(impl.latestVersion);
|
||||||
|
impl.insert(weaselab::VersionedMap::Mutation{
|
||||||
|
(const uint8_t *)&k, nullptr, sizeof(k), 0,
|
||||||
|
weaselab::VersionedMap::Set});
|
||||||
|
impl.roots.add(impl.latestRoot, impl.latestVersion);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user