Files
weaseldb/benchmarks/bench_metric.cpp

250 lines
7.5 KiB
C++

#include <nanobench.h>
#include "arena_allocator.hpp"
#include "metric.hpp"
#include <atomic>
#include <cmath>
#include <latch>
#include <random>
#include <thread>
#include <vector>
metric::Family<metric::Gauge> gauge_family = metric::create_gauge("gauge", "");
metric::Family<metric::Counter> counter_family =
metric::create_counter("counter", "");
metric::Family<metric::Histogram> histogram_family = metric::create_histogram(
"histogram", "", metric::exponential_buckets(0.001, 5, 8));
// High-contention benchmark setup
struct ContentionEnvironment {
// Background threads for contention
std::vector<std::thread> background_threads;
std::atomic<bool> stop_flag{false};
// Synchronization latches - must be members to avoid use-after-return
std::unique_ptr<std::latch> contention_latch;
std::unique_ptr<std::latch> render_latch;
ContentionEnvironment() = default;
void start_background_contention(int num_threads = 4) {
stop_flag.store(false);
contention_latch = std::make_unique<std::latch>(num_threads + 1);
for (int i = 0; i < num_threads; ++i) {
background_threads.emplace_back([this, i]() {
auto bg_counter = counter_family.create({});
auto bg_gauge = gauge_family.create({});
auto bg_histogram = histogram_family.create({});
std::mt19937 rng(i);
std::uniform_real_distribution<double> dist(0.0, 10.0);
contention_latch
->arrive_and_wait(); // All background threads start together
while (!stop_flag.load(std::memory_order_relaxed)) {
// Simulate mixed workload
bg_counter.inc(1.0);
bg_gauge.set(dist(rng));
bg_gauge.inc(1.0);
bg_histogram.observe(dist(rng));
}
});
}
contention_latch
->arrive_and_wait(); // Wait for all background threads to be ready
}
void start_render_thread() {
render_latch = std::make_unique<std::latch>(2);
background_threads.emplace_back([this]() {
ArenaAllocator arena;
render_latch->arrive_and_wait(); // Render thread signals it's ready
while (!stop_flag.load(std::memory_order_relaxed)) {
auto output = metric::render(arena);
static_cast<void>(output); // Suppress unused variable warning
arena.reset();
}
});
render_latch->arrive_and_wait(); // Wait for render thread to be ready
}
void stop_background_threads() {
stop_flag.store(true);
for (auto &t : background_threads) {
if (t.joinable()) {
t.join();
}
}
background_threads.clear();
}
~ContentionEnvironment() { stop_background_threads(); }
};
int main() {
ankerl::nanobench::Bench bench;
bench.title("WeaselDB Metrics Performance").unit("operation").warmup(1000);
auto counter = counter_family.create({});
auto gauge = gauge_family.create({});
auto histogram = histogram_family.create({});
// Baseline performance without contention
{
bench.run("counter.inc() - no contention", [&]() {
counter.inc(1.0);
ankerl::nanobench::doNotOptimizeAway(counter);
});
bench.run("gauge.inc() - no contention", [&]() {
gauge.inc(1.0);
ankerl::nanobench::doNotOptimizeAway(gauge);
});
bench.run("gauge.set() - no contention", [&]() {
gauge.set(42.0);
ankerl::nanobench::doNotOptimizeAway(gauge);
});
bench.run("histogram.observe() - no contention", [&]() {
histogram.observe(0.5);
ankerl::nanobench::doNotOptimizeAway(histogram);
});
}
// High contention with background threads
{
ContentionEnvironment env;
// Start background threads creating contention
env.start_background_contention(8);
bench.run("counter.inc() - 8 background threads",
[&]() { counter.inc(1.0); });
bench.run("gauge.inc() - 8 background threads", [&]() { gauge.inc(1.0); });
bench.run("gauge.set() - 8 background threads", [&]() { gauge.set(42.0); });
bench.run("histogram.observe() - 8 background threads",
[&]() { histogram.observe(1.5); });
}
// Concurrent render contention
{
ContentionEnvironment env;
// Start background threads + render thread
env.start_background_contention(4);
env.start_render_thread();
bench.run("counter.inc() - with concurrent render",
[&]() { counter.inc(1.0); });
bench.run("gauge.inc() - with concurrent render",
[&]() { gauge.inc(1.0); });
bench.run("histogram.observe() - with concurrent render",
[&]() { histogram.observe(2.0); });
}
// Render performance scaling
{
// Test render performance as number of metrics increases
std::vector<metric::Counter> counters;
std::vector<metric::Gauge> gauges;
std::vector<metric::Histogram> histograms;
auto counter_family =
metric::create_counter("scale_counter", "Scale counter");
auto gauge_family = metric::create_gauge("scale_gauge", "Scale gauge");
auto histogram_family = metric::create_histogram(
"scale_histogram", "Scale histogram",
std::initializer_list<double>{0.1, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0,
50.0});
// Create varying numbers of metrics
for (int scale : {10, 100, 1000}) {
// Clear previous metrics by creating new families
// (Note: In real usage, metrics persist for application lifetime)
for (int i = 0; i < scale; ++i) {
counters.emplace_back(
counter_family.create({{"id", std::to_string(i)}}));
gauges.emplace_back(gauge_family.create({{"id", std::to_string(i)}}));
histograms.emplace_back(
histogram_family.create({{"id", std::to_string(i)}}));
// Set some values
counters.back().inc(static_cast<double>(i));
gauges.back().set(static_cast<double>(i * 2));
histograms.back().observe(static_cast<double>(i) * 0.1);
}
ArenaAllocator arena;
std::string bench_name =
"render() - " + std::to_string(scale) + " metrics each type";
bench.run(bench_name, [&]() {
auto output = metric::render(arena);
ankerl::nanobench::doNotOptimizeAway(output);
arena.reset();
});
}
}
// Callback metrics performance
{
auto counter_family =
metric::create_counter("callback_counter", "Callback counter");
auto gauge_family =
metric::create_gauge("callback_gauge", "Callback gauge");
std::atomic<double> counter_value{0};
std::atomic<double> gauge_value{100};
// Register callbacks
counter_family.register_callback(
{{"type", "callback"}}, [&counter_value]() {
return counter_value.load(std::memory_order_relaxed);
});
gauge_family.register_callback({{"type", "callback"}}, [&gauge_value]() {
return gauge_value.load(std::memory_order_relaxed);
});
// Background thread updating callback values
std::atomic<bool> stop_callback{false};
std::latch start_latch{2}; // Background thread + benchmark thread
std::thread callback_updater([&]() {
start_latch.arrive_and_wait(); // Wait for benchmark to start
while (!stop_callback.load()) {
counter_value.fetch_add(1);
gauge_value.store(gauge_value.load() + 1);
}
});
ArenaAllocator arena;
start_latch.arrive_and_wait(); // Wait for background thread to be ready
bench.run("render() - with callback metrics", [&]() {
auto output = metric::render(arena);
ankerl::nanobench::doNotOptimizeAway(output);
arena.reset();
});
stop_callback.store(true);
callback_updater.join();
}
return 0;
}