Fix an issue with versioned-only benchmark

Previously it was reading some of the keys from unversioned storage.

Also add some more benchmarks
This commit is contained in:
2024-06-19 15:08:16 -07:00
parent 39273424c1
commit 57cceaf3b7

113
Bench.cpp
View File

@@ -3,6 +3,34 @@
#include <nanobench.h>
void iterBench(const Facade &facade, int64_t version,
const std::string &context) {
ankerl::nanobench::Bench bench;
bench.minEpochIterations(10000);
const auto begin = facade.versioned.begin(version);
const auto end = facade.versioned.end(version);
auto iter = begin;
bench.run("*iter (" + context + ")", [&] { bench.doNotOptimizeAway(*iter); });
iter = begin;
bench.run("++iter (" + context + ")", [&] {
++iter;
if (iter == end) {
iter = begin;
}
});
iter = end;
bench.run("--iter (" + context + ")", [&] {
--iter;
if (iter == begin) {
iter = end;
}
});
}
void monotonicallyIncreasing() {
constexpr int kWindow = 1000;
ankerl::nanobench::Bench bench;
@@ -25,27 +53,7 @@ void monotonicallyIncreasing() {
});
const auto v = facade.getVersion() - kWindow / 2;
const auto begin = facade.versioned.begin(v);
const auto end = facade.versioned.end(v);
auto iter = begin;
bench.run("*iter", [&] { bench.doNotOptimizeAway(*iter); });
iter = begin;
bench.run("++iter", [&] {
++iter;
if (iter == end) {
iter = begin;
}
});
iter = end;
bench.run("--iter", [&] {
--iter;
if (iter == begin) {
iter = end;
}
});
iterBench(facade, v, "monotonically increasing");
bench.run("begin", [&] { facade.versioned.begin(v); });
@@ -56,6 +64,27 @@ void monotonicallyIncreasing() {
});
bench.run("end", [&] { facade.versioned.end(v); });
{
ankerl::nanobench::Bench bench;
bench.batch(kWindow);
bench.run("Facade monotonically-increasing read forward", [&]() {
if (facade.viewAt(facade.getVersion())
.rangeRead(String(), String({0xff}), kWindow, false)
.size() != kWindow) {
abort();
}
});
bench.run("Facade monotonically-increasing read reverse", [&]() {
if (facade.viewAt(facade.getVersion())
.rangeRead(String(), String({0xff}), kWindow, true)
.size() != kWindow) {
abort();
}
});
}
}
void bulkFirstGeq() {
@@ -105,7 +134,7 @@ void bulkFirstGeq() {
void facadeVersionedOnlyRead() {
Facade facade{0};
constexpr int kNumKeys = 10000;
constexpr int kNumKeys = 1000;
ankerl::nanobench::Bench bench;
bench.batch(kNumKeys);
@@ -125,35 +154,39 @@ void facadeVersionedOnlyRead() {
facade.getVersion() + 1);
}
// Add keys
Arena arena;
weaselab::VersionedMap::Mutation *mutations =
new (arena) weaselab::VersionedMap::Mutation[kNumKeys];
for (int i = 0; i < kNumKeys; ++i) {
const int64_t k = __builtin_bswap64(i);
weaselab::VersionedMap::Mutation mutations[] = {
{(const uint8_t *)&k, 8, (const uint8_t *)&k, 8,
weaselab::VersionedMap::Set},
};
facade.addMutations(mutations, sizeof(mutations) / sizeof(mutations[0]),
facade.getVersion() + 1);
uint8_t *buf = new (arena) uint8_t[8];
memcpy(buf, &k, 8);
mutations[i] = {buf, 8, buf, 8, weaselab::VersionedMap::Set};
}
// Add keys
facade.addMutations(mutations, kNumKeys, facade.getVersion() + 1);
// Populate the unversioned map
for (int i = 0; i < kNumKeys; ++i) {
const int64_t k = __builtin_bswap64(i);
weaselab::VersionedMap::Mutation mutations[] = {
{(const uint8_t *)&k, 8, (const uint8_t *)&k, 8,
weaselab::VersionedMap::Set},
};
facade.addMutations(mutations, sizeof(mutations) / sizeof(mutations[0]),
facade.getVersion() + 1);
}
facade.addMutations(mutations, kNumKeys, facade.getVersion() + 1);
facade.setOldestVersion(facade.getVersion() - 1, /*force*/ true);
iterBench(facade, facade.getVersion(), "adjacent sets/clears");
bench.run("Facade versioned-only read forward", [&]() {
facade.viewAt(facade.getVersion()).rangeRead(begin, end, kNumKeys, false);
if (facade.viewAt(facade.getVersion())
.rangeRead(begin, end, kNumKeys, false)
.size() != kNumKeys) {
abort();
}
});
bench.run("Facade versioned-only read reverse", [&]() {
facade.viewAt(facade.getVersion()).rangeRead(begin, end, kNumKeys, true);
if (facade.viewAt(facade.getVersion())
.rangeRead(begin, end, kNumKeys, true)
.size() != kNumKeys) {
abort();
}
});
}