#include #include "arena_allocator.hpp" #include "metric.hpp" #include #include #include #include #include #include metric::Family gauge_family = metric::create_gauge("gauge", ""); metric::Family counter_family = metric::create_counter("counter", ""); metric::Family histogram_family = metric::create_histogram( "histogram", "", metric::exponential_buckets(0.001, 5, 8)); // High-contention benchmark setup struct ContentionEnvironment { // Background threads for contention std::vector background_threads; std::atomic stop_flag{false}; // Synchronization latches - must be members to avoid use-after-return std::unique_ptr contention_latch; std::unique_ptr render_latch; ContentionEnvironment() = default; void start_background_contention(int num_threads = 4) { stop_flag.store(false); contention_latch = std::make_unique(num_threads + 1); for (int i = 0; i < num_threads; ++i) { background_threads.emplace_back([this, i]() { auto bg_counter = counter_family.create({}); auto bg_gauge = gauge_family.create({}); auto bg_histogram = histogram_family.create({}); std::mt19937 rng(i); std::uniform_real_distribution dist(0.0, 10.0); contention_latch ->arrive_and_wait(); // All background threads start together while (!stop_flag.load(std::memory_order_relaxed)) { // Simulate mixed workload bg_counter.inc(1.0); bg_gauge.set(dist(rng)); bg_gauge.inc(1.0); bg_histogram.observe(dist(rng)); } }); } contention_latch ->arrive_and_wait(); // Wait for all background threads to be ready } void start_render_thread() { render_latch = std::make_unique(2); background_threads.emplace_back([this]() { ArenaAllocator arena; render_latch->arrive_and_wait(); // Render thread signals it's ready while (!stop_flag.load(std::memory_order_relaxed)) { auto output = metric::render(arena); static_cast(output); // Suppress unused variable warning arena.reset(); } }); render_latch->arrive_and_wait(); // Wait for render thread to be ready } void stop_background_threads() { stop_flag.store(true); for (auto &t : background_threads) { if (t.joinable()) { t.join(); } } background_threads.clear(); } ~ContentionEnvironment() { stop_background_threads(); } }; int main() { ankerl::nanobench::Bench bench; bench.title("WeaselDB Metrics Performance").unit("operation").warmup(1000); auto counter = counter_family.create({}); auto gauge = gauge_family.create({}); auto histogram = histogram_family.create({}); // Baseline performance without contention { bench.run("counter.inc() - no contention", [&]() { counter.inc(1.0); ankerl::nanobench::doNotOptimizeAway(counter); }); bench.run("gauge.inc() - no contention", [&]() { gauge.inc(1.0); ankerl::nanobench::doNotOptimizeAway(gauge); }); bench.run("gauge.set() - no contention", [&]() { gauge.set(42.0); ankerl::nanobench::doNotOptimizeAway(gauge); }); bench.run("histogram.observe() - no contention", [&]() { histogram.observe(0.5); ankerl::nanobench::doNotOptimizeAway(histogram); }); } // High contention with background threads { ContentionEnvironment env; // Start background threads creating contention env.start_background_contention(8); bench.run("counter.inc() - 8 background threads", [&]() { counter.inc(1.0); }); bench.run("gauge.inc() - 8 background threads", [&]() { gauge.inc(1.0); }); bench.run("gauge.set() - 8 background threads", [&]() { gauge.set(42.0); }); bench.run("histogram.observe() - 8 background threads", [&]() { histogram.observe(1.5); }); } // Concurrent render contention { ContentionEnvironment env; // Start background threads + render thread env.start_background_contention(4); env.start_render_thread(); bench.run("counter.inc() - with concurrent render", [&]() { counter.inc(1.0); }); bench.run("gauge.inc() - with concurrent render", [&]() { gauge.inc(1.0); }); bench.run("histogram.observe() - with concurrent render", [&]() { histogram.observe(2.0); }); } // Render performance scaling { bench.unit("metric"); bench.title("render performance"); // Test render performance as number of metrics increases // Create varying numbers of metrics for (int scale : {10, 100, 1000}) { metric::reset_metrics_for_testing(); std::vector counters; std::vector gauges; std::vector histograms; auto counter_family = metric::create_counter("scale_counter", "Scale counter"); auto gauge_family = metric::create_gauge("scale_gauge", "Scale gauge"); auto buckets = std::initializer_list{0.1, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0}; auto histogram_family = metric::create_histogram( "scale_histogram", "Scale histogram", buckets); std::atomic counter_value{3.1415924654}; bench.batch(scale * (/*counter*/ 1 + /*gauge*/ 1 + /*callback*/ 1 + /*histogram*/ (buckets.size() * 2 + 2))); // Clear previous metrics by creating new families // (Note: In real usage, metrics persist for application lifetime) for (int i = 0; i < scale; ++i) { counters.emplace_back( counter_family.create({{"id", std::to_string(i)}})); gauges.emplace_back(gauge_family.create({{"id", std::to_string(i)}})); histograms.emplace_back( histogram_family.create({{"id", std::to_string(i)}})); // Set some values counters.back().inc(static_cast(i)); gauges.back().set(static_cast(i * 2)); histograms.back().observe(static_cast(i) * 0.1); // Register callbacks counter_family.register_callback( {{"type", "callback"}, {"id", std::to_string(i)}}, [&counter_value]() { return counter_value.load(std::memory_order_relaxed); }); } ArenaAllocator arena; std::string bench_name = "render() - " + std::to_string(scale) + " metrics each type"; bench.run(bench_name, [&]() { auto output = metric::render(arena); ankerl::nanobench::doNotOptimizeAway(output); arena.reset(); }); } } return 0; }