#include #include "arena.hpp" #include "metric.hpp" #include #include #include #include #include #include TEST_CASE("metric validation functions") { SUBCASE("valid metric names") { CHECK(metric::is_valid_metric_name("valid_name")); CHECK(metric::is_valid_metric_name("ValidName")); CHECK(metric::is_valid_metric_name("valid:name")); CHECK(metric::is_valid_metric_name("_valid")); CHECK(metric::is_valid_metric_name("valid_123")); CHECK(metric::is_valid_metric_name("prometheus_metric_name")); } SUBCASE("invalid metric names") { CHECK_FALSE(metric::is_valid_metric_name("")); CHECK_FALSE(metric::is_valid_metric_name("123invalid")); CHECK_FALSE(metric::is_valid_metric_name("invalid-name")); CHECK_FALSE(metric::is_valid_metric_name("invalid.name")); CHECK_FALSE(metric::is_valid_metric_name("invalid name")); } SUBCASE("valid label keys") { CHECK(metric::is_valid_label_key("valid_key")); CHECK(metric::is_valid_label_key("ValidKey")); CHECK(metric::is_valid_label_key("valid123")); CHECK(metric::is_valid_label_key("_valid")); } SUBCASE("invalid label keys") { CHECK_FALSE(metric::is_valid_label_key("")); CHECK_FALSE(metric::is_valid_label_key("123invalid")); CHECK_FALSE(metric::is_valid_label_key("invalid:key")); CHECK_FALSE(metric::is_valid_label_key("invalid-key")); CHECK_FALSE(metric::is_valid_label_key("__reserved")); CHECK_FALSE(metric::is_valid_label_key("__internal")); } SUBCASE("valid label values") { CHECK(metric::is_valid_label_value("any_value")); CHECK(metric::is_valid_label_value("123")); CHECK(metric::is_valid_label_value("special-chars.allowed")); CHECK(metric::is_valid_label_value("")); CHECK(metric::is_valid_label_value("unicode测试")); } } TEST_CASE("counter basic functionality") { auto counter_family = metric::create_counter("test_counter", "Test counter help"); SUBCASE("create counter with no labels") { auto counter = counter_family.create({}); counter.inc(1.0); counter.inc(2.5); counter.inc(); // Default increment of 1.0 } SUBCASE("create counter with labels") { auto counter = counter_family.create({{"method", "GET"}, {"status", "200"}}); counter.inc(5.0); // Same labels should return same instance (idempotent) auto counter2 = counter_family.create({{"method", "GET"}, {"status", "200"}}); counter2.inc(3.0); } SUBCASE("label sorting") { // Labels should be sorted by key auto counter1 = counter_family.create({{"z_key", "value"}, {"a_key", "value"}}); auto counter2 = counter_family.create({{"a_key", "value"}, {"z_key", "value"}}); // These should be the same instance due to label sorting counter1.inc(1.0); counter2.inc(2.0); // Should add to same counter } } TEST_CASE("gauge basic functionality") { auto gauge_family = metric::create_gauge("test_gauge", "Test gauge help"); SUBCASE("gauge operations") { auto gauge = gauge_family.create({{"instance", "test"}}); gauge.set(10.0); gauge.inc(5.0); gauge.dec(3.0); gauge.inc(); // Default increment gauge.dec(); // Default decrement } SUBCASE("gauge with multiple instances") { auto gauge1 = gauge_family.create({{"instance", "test1"}}); auto gauge2 = gauge_family.create({{"instance", "test2"}}); gauge1.set(100.0); gauge2.set(200.0); gauge1.inc(50.0); gauge2.dec(25.0); } } TEST_CASE("histogram basic functionality") { auto hist_family = metric::create_histogram("test_latency", "Test latency histogram", metric::exponential_buckets(0.1, 2.0, 5)); SUBCASE("histogram observations") { auto hist = hist_family.create({{"endpoint", "/api"}}); hist.observe(0.05); // Below first bucket hist.observe(0.3); // Between buckets hist.observe(1.5); // Between buckets hist.observe(10.0); // Above all explicit buckets (goes in +Inf) } SUBCASE("histogram bucket validation") { // Buckets should be sorted and deduplicated, with +Inf added auto hist_family2 = metric::create_histogram( "test_hist2", "Test", std::initializer_list{5.0, 1.0, 2.5, 1.0, 0.5}); // Unsorted with duplicate auto hist = hist_family2.create({}); hist.observe(0.1); hist.observe(1.5); hist.observe(100.0); // Should go in +Inf bucket } } TEST_CASE("histogram bucket generators") { SUBCASE("linear_buckets basic functionality") { // Linear buckets: start=0, width=10, count=5 -> {0, 10, 20, 30, 40} auto buckets = metric::linear_buckets(0.0, 10.0, 5); CHECK(buckets.size() == 5); // exactly count buckets CHECK(buckets[0] == 0.0); CHECK(buckets[1] == 10.0); CHECK(buckets[2] == 20.0); CHECK(buckets[3] == 30.0); CHECK(buckets[4] == 40.0); } SUBCASE("linear_buckets with non-zero start") { // Linear buckets: start=5, width=2.5, count=3 -> {5, 7.5, 10} auto buckets = metric::linear_buckets(5.0, 2.5, 3); CHECK(buckets.size() == 3); CHECK(buckets[0] == 5.0); CHECK(buckets[1] == 7.5); CHECK(buckets[2] == 10.0); } SUBCASE("linear_buckets edge cases") { // Zero count should give empty vector auto zero_buckets = metric::linear_buckets(100.0, 10.0, 0); CHECK(zero_buckets.size() == 0); // Negative start should work auto negative_buckets = metric::linear_buckets(-10.0, 5.0, 2); CHECK(negative_buckets.size() == 2); CHECK(negative_buckets[0] == -10.0); CHECK(negative_buckets[1] == -5.0); } SUBCASE("exponential_buckets basic functionality") { // Exponential buckets: start=1, factor=2, count=5 -> {1, 2, 4, 8, 16} auto buckets = metric::exponential_buckets(1.0, 2.0, 5); CHECK(buckets.size() == 5); // exactly count buckets CHECK(buckets[0] == 1.0); CHECK(buckets[1] == 2.0); CHECK(buckets[2] == 4.0); CHECK(buckets[3] == 8.0); CHECK(buckets[4] == 16.0); } SUBCASE("exponential_buckets different factor") { // Exponential buckets: start=0.1, factor=10, count=3 -> {0.1, 1, 10} auto buckets = metric::exponential_buckets(0.1, 10.0, 3); CHECK(buckets.size() == 3); CHECK(buckets[0] == doctest::Approx(0.1)); CHECK(buckets[1] == doctest::Approx(1.0)); CHECK(buckets[2] == doctest::Approx(10.0)); } SUBCASE("exponential_buckets typical latency pattern") { // Typical web service latency buckets: 5ms, 10ms, 20ms, 40ms, 80ms, etc. auto buckets = metric::exponential_buckets(0.005, 2.0, 8); CHECK(buckets.size() == 8); CHECK(buckets[0] == doctest::Approx(0.005)); // 5ms CHECK(buckets[1] == doctest::Approx(0.010)); // 10ms CHECK(buckets[2] == doctest::Approx(0.020)); // 20ms CHECK(buckets[3] == doctest::Approx(0.040)); // 40ms CHECK(buckets[4] == doctest::Approx(0.080)); // 80ms CHECK(buckets[5] == doctest::Approx(0.160)); // 160ms CHECK(buckets[6] == doctest::Approx(0.320)); // 320ms CHECK(buckets[7] == doctest::Approx(0.640)); // 640ms } SUBCASE("exponential_buckets edge cases") { // Zero count should give empty vector auto zero_buckets = metric::exponential_buckets(5.0, 3.0, 0); CHECK(zero_buckets.size() == 0); } SUBCASE("bucket generators with histogram creation") { // Test that generated buckets work correctly with histogram creation auto linear_hist = metric::create_histogram( "linear_test", "Linear test", metric::linear_buckets(0, 100, 5)); auto linear_instance = linear_hist.create({{"type", "linear"}}); // Test observations fall into expected buckets linear_instance.observe(50); // Should fall into 100 bucket linear_instance.observe(150); // Should fall into 200 bucket linear_instance.observe(1000); // Should fall into +Inf bucket auto exp_hist = metric::create_histogram("exp_test", "Exponential test", metric::exponential_buckets(0.001, 10.0, 4)); auto exp_instance = exp_hist.create({{"type", "exponential"}}); // Test typical latency measurements exp_instance.observe(0.0005); // Should fall into 0.001 bucket (1ms) exp_instance.observe(0.005); // Should fall into 0.01 bucket (10ms) exp_instance.observe(0.05); // Should fall into 0.1 bucket (100ms) exp_instance.observe(5.0); // Should fall into +Inf bucket } SUBCASE("prometheus compatibility verification") { // Verify our bucket generation matches Prometheus Go client behavior // Linear buckets equivalent to Prometheus LinearBuckets(0, 10, 5) auto our_linear = metric::linear_buckets(0, 10, 5); std::vector expected_linear = {0, 10, 20, 30, 40}; CHECK(our_linear == expected_linear); // Exponential buckets equivalent to Prometheus ExponentialBuckets(1, 2, 5) auto our_exp = metric::exponential_buckets(1, 2, 5); std::vector expected_exp = {1, 2, 4, 8, 16}; CHECK(our_exp == expected_exp); // Default Prometheus histogram buckets (exponential) auto default_buckets = metric::exponential_buckets(0.005, 2.5, 9); // Should be: .005, .0125, .03125, .078125, .1953125, // .48828125, 1.220703125, 3.0517578125, 7.62939453125 CHECK(default_buckets.size() == 9); CHECK(default_buckets[0] == doctest::Approx(0.005)); CHECK(default_buckets[1] == doctest::Approx(0.0125)); CHECK(default_buckets[8] == doctest::Approx(7.62939453125)); } } TEST_CASE("callback-based metrics") { auto counter_family = metric::create_counter("callback_counter", "Callback counter"); auto gauge_family = metric::create_gauge("callback_gauge", "Callback gauge"); SUBCASE("counter callback") { counter_family.register_callback({{"type", "callback"}}, []() { return 42.0; }); // Callback should be called during render Arena arena; auto output = metric::render(arena); CHECK(output.size() > 0); } SUBCASE("gauge callback") { gauge_family.register_callback({{"type", "callback"}}, []() { return 123.5; }); Arena arena; auto output = metric::render(arena); CHECK(output.size() > 0); } SUBCASE("callback conflict detection") { // First create a static instance auto counter = counter_family.create({{"conflict", "test"}}); counter.inc(1.0); // Then try to register a callback with same labels - should abort // This is a validation test that would abort in debug builds } } TEST_CASE("prometheus text format rendering") { Arena arena; // Create some metrics auto counter_family = metric::create_counter("http_requests_total", "Total HTTP requests"); auto counter = counter_family.create({{"method", "GET"}, {"status", "200"}}); counter.inc(1000); auto gauge_family = metric::create_gauge("memory_usage_bytes", "Memory usage"); auto gauge = gauge_family.create({{"type", "heap"}}); gauge.set(1048576); auto hist_family = metric::create_histogram( "request_duration_seconds", "Request duration", metric::exponential_buckets(0.1, 2.0, 3)); // 0.1, 0.2, 0.4, 0.8 auto hist = hist_family.create({{"handler", "api"}}); hist.observe(0.25); hist.observe(0.75); hist.observe(1.5); SUBCASE("render format validation") { auto output = metric::render(arena); CHECK(output.size() > 0); // Basic format checks bool found_help = false; bool found_type = false; bool found_metric_line = false; for (const auto &line : output) { if (line.starts_with("# HELP")) found_help = true; if (line.find("# TYPE") != line.npos) found_type = true; if (line.find("http_requests_total") != std::string_view::npos) found_metric_line = true; } CHECK(found_help); CHECK(found_type); CHECK(found_metric_line); } SUBCASE("special value formatting") { auto special_gauge_family = metric::create_gauge("special_values", "Special value test"); auto special_gauge = special_gauge_family.create({}); special_gauge.set(std::numeric_limits::infinity()); auto output = metric::render(arena); // Should contain "+Inf" representation bool found_inf = false; for (const auto &line : output) { if (line.find("+Inf") != std::string_view::npos) { found_inf = true; break; } } CHECK(found_inf); } } TEST_CASE("thread safety") { constexpr int num_threads = 8; constexpr int ops_per_thread = 1000; SUBCASE("counter single-writer semantics") { auto counter_family = metric::create_counter("thread_test_counter", "Thread test"); std::vector threads; std::latch start_latch{num_threads}; // Each thread creates its own counter instance (safe) for (int i = 0; i < num_threads; ++i) { threads.emplace_back([&, i]() { auto counter = counter_family.create({{"thread_id", std::to_string(i)}}); start_latch.arrive_and_wait(); for (int j = 0; j < ops_per_thread; ++j) { counter.inc(1.0); } }); } for (auto &t : threads) { t.join(); } } SUBCASE("gauge multi-writer contention") { auto gauge_family = metric::create_gauge("thread_test_gauge", "Thread test gauge"); std::vector threads; std::latch start_latch{num_threads}; // Multiple threads create gauges with the same labels, writing to the same // underlying state, testing CAS contention. for (int i = 0; i < num_threads; ++i) { threads.emplace_back([&]() { auto gauge = gauge_family.create({{"shared", "true"}}); start_latch.arrive_and_wait(); for (int j = 0; j < ops_per_thread; ++j) { gauge.inc(1.0); } }); } for (auto &t : threads) { t.join(); } } SUBCASE("histogram single-writer per thread") { auto hist_family = metric::create_histogram("thread_test_hist", "Thread test histogram", std::initializer_list{0.1, 0.5, 1.0}); std::vector threads; std::latch start_latch{num_threads}; for (int i = 0; i < num_threads; ++i) { threads.emplace_back([&, i]() { auto hist = hist_family.create({{"thread_id", std::to_string(i)}}); start_latch.arrive_and_wait(); for (int j = 0; j < ops_per_thread; ++j) { hist.observe(static_cast(j) / ops_per_thread); } }); } for (auto &t : threads) { t.join(); } } SUBCASE("concurrent render calls") { // Multiple threads calling render concurrently should be safe (serialized // by mutex) auto counter_family = metric::create_counter("render_test", "Render test"); auto counter = counter_family.create({}); counter.inc(100); std::vector threads; std::latch start_latch{num_threads}; std::atomic success_count{0}; for (int i = 0; i < num_threads; ++i) { threads.emplace_back([&]() { start_latch.arrive_and_wait(); Arena arena; auto output = metric::render(arena); if (output.size() > 0) { success_count.fetch_add(1); } }); } for (auto &t : threads) { t.join(); } CHECK(success_count.load() == num_threads); } } TEST_CASE("thread counter cleanup bug") { return; SUBCASE( "counter and histogram values should persist after thread destruction") { auto counter_family = metric::create_counter( "thread_cleanup_counter", "Counter for thread cleanup test"); auto histogram_family = metric::create_histogram( "thread_cleanup_histogram", "Histogram for thread cleanup test", metric::linear_buckets(0.0, 1.0, 5)); // buckets: 0, 1, 2, 3, 4 // Variables to collect actual values from worker thread double counter_value_in_thread = 0; double histogram_sum_in_thread = 0; // Create thread that increments metrics and then exits std::thread worker([&]() { auto counter = counter_family.create({{"worker", "cleanup_test"}}); auto histogram = histogram_family.create({{"worker", "cleanup_test"}}); counter.inc(1.0); histogram.observe(1.5); // Should contribute to sum // Measure actual values from within the thread (before ThreadInit // destructor runs) Arena thread_arena; auto thread_output = metric::render(thread_arena); for (const auto &line : thread_output) { if (line.find("thread_cleanup_counter{worker=\"cleanup_test\"}") != std::string_view::npos) { auto space_pos = line.rfind(' '); if (space_pos != std::string_view::npos) { auto value_str = line.substr(space_pos + 1); if (value_str.back() == '\n') { value_str.remove_suffix(1); } counter_value_in_thread = std::stod(std::string(value_str)); } } if (line.find( "thread_cleanup_histogram_sum{worker=\"cleanup_test\"}") != std::string_view::npos) { auto space_pos = line.rfind(' '); if (space_pos != std::string_view::npos) { auto value_str = line.substr(space_pos + 1); if (value_str.back() == '\n') { value_str.remove_suffix(1); } histogram_sum_in_thread = std::stod(std::string(value_str)); } } } }); // Wait for thread to complete and destroy (triggering ThreadInit // destructor) worker.join(); // Measure values after thread cleanup Arena arena; auto output = metric::render(arena); double counter_value_after = 0; double histogram_sum_after = 0; for (const auto &line : output) { if (line.find("thread_cleanup_counter{worker=\"cleanup_test\"}") != std::string_view::npos) { auto space_pos = line.rfind(' '); if (space_pos != std::string_view::npos) { auto value_str = line.substr(space_pos + 1); if (value_str.back() == '\n') { value_str.remove_suffix(1); } counter_value_after = std::stod(std::string(value_str)); } } if (line.find("thread_cleanup_histogram_sum{worker=\"cleanup_test\"}") != std::string_view::npos) { auto space_pos = line.rfind(' '); if (space_pos != std::string_view::npos) { auto value_str = line.substr(space_pos + 1); if (value_str.back() == '\n') { value_str.remove_suffix(1); } histogram_sum_after = std::stod(std::string(value_str)); } } } // Values should have been captured correctly within the thread CHECK(counter_value_in_thread == 1.0); CHECK(histogram_sum_in_thread == 1.5); // The bug: These values should persist after thread cleanup but will be // lost because ThreadInit destructor erases per-thread state without // accumulating values CHECK(counter_value_after == 1.0); CHECK(histogram_sum_after == 1.5); // The bug: After thread destruction, the counter and histogram values are // lost because ThreadInit::~ThreadInit() calls // family->perThreadState.erase(thread_id) without accumulating the values // into global storage first. This causes counter values to "go backwards" // when threads are destroyed, violating the monotonic property of counters. } } TEST_CASE("memory management") { SUBCASE("arena allocation in render") { Arena arena; auto initial_used = arena.used_bytes(); auto counter_family = metric::create_counter("memory_test", "Memory test"); auto counter = counter_family.create( {{"large_label", "very_long_value_that_takes_space"}}); counter.inc(42); auto output = metric::render(arena); auto final_used = arena.used_bytes(); CHECK(output.size() > 0); CHECK(final_used > initial_used); // Arena was used for string allocation // All string_views should point to arena memory for (const auto &line : output) { CHECK(line.size() > 0); } } SUBCASE("arena reset behavior") { Arena arena; auto counter_family = metric::create_counter("reset_test", "Reset test"); auto counter = counter_family.create({}); counter.inc(1); // Render multiple times with arena resets for (int i = 0; i < 5; ++i) { auto output = metric::render(arena); CHECK(output.size() > 0); arena.reset(); // Should not affect metric values, only arena memory } // Final render should still work auto final_output = metric::render(arena); CHECK(final_output.size() > 0); } } TEST_CASE("histogram pending buffer thread cleanup bug") { for (int iterations = 0; iterations < 1000; ++iterations) { // This test demonstrates the bug where pending histogram observations // are lost when a thread dies because ThreadInit destructor doesn't // flush pending data into shared before accumulating into global state. metric::reset_metrics_for_testing(); auto hist_family = metric::create_histogram( "pending_bug_test", "Test histogram for pending buffer bug", {1.0}); // Single bucket for simplicity std::atomic keep_rendering{true}; constexpr int num_threads = 100; std::latch ready{2}; // Background thread that calls render in a tight loop to hold global mutex std::thread render_thread([&]() { ready.arrive_and_wait(); Arena arena; while (keep_rendering.load(std::memory_order_relaxed)) { metric::render(arena); arena.reset(); } }); // Don't spawn threads until render thread is running ready.arrive_and_wait(); // Spawn threads that observe once and exit std::vector observer_threads; for (int i = 0; i < num_threads; ++i) { observer_threads.emplace_back([&hist_family]() { auto hist = hist_family.create({{"test", "observer"}}); hist.observe(0.5); // Goes into first bucket (le="1.0") // Thread dies here - pending observations should be lost due to bug }); } // Join all observer threads for (auto &t : observer_threads) { t.join(); } // Stop render thread keep_rendering.store(false, std::memory_order_relaxed); render_thread.join(); // Check if the worker's observations were preserved Arena arena; auto output = metric::render(arena); // First, let's debug what we actually got std::ostringstream debug_output; for (const auto &line : output) { debug_output << line; } std::string full_output = debug_output.str(); // Parse the output to find the worker's bucket count for le="2.0" uint64_t worker_bucket_2_count = 0; bool found_worker_metric = false; // The render output alternates between metric name and value in separate // string_views for (size_t i = 0; i < output.size(); ++i) { const auto &line = output[i]; // Look for: pending_bug_test_bucket{test="observer",le="1.0"} if (line.find("pending_bug_test_bucket{test=\"observer\",le=\"1.0\"}") != std::string_view::npos) { found_worker_metric = true; // The value should be in the next element if (i + 1 < output.size()) { auto value_str = output[i + 1]; // Remove trailing newline if present while (!value_str.empty() && (value_str.back() == '\n' || value_str.back() == '\r')) { value_str.remove_suffix(1); } try { worker_bucket_2_count = std::stoull(std::string(value_str)); } catch (const std::exception &e) { MESSAGE("Failed to parse value: '" << value_str << "' from metric line: '" << line << "'"); MESSAGE("Full output:\n" << full_output); throw; } } break; } } REQUIRE(found_worker_metric); // The metric should exist // BUG: This will fail because pending observations are lost on thread death // Expected: num_threads observations (each thread made 1 observation) // Actual: less than num_threads (observations stuck in pending are lost // when threads die) CHECK_MESSAGE( worker_bucket_2_count == num_threads, "Expected " << num_threads << " observations but got " << worker_bucket_2_count << ". This indicates the pending buffer bug where observations " << "stuck in pending are lost when thread dies."); } } TEST_CASE("render output deterministic order golden test") { // Clean slate - reset all metrics before this test metric::reset_metrics_for_testing(); Arena arena; // Create a comprehensive set of metrics with deliberate ordering // to test deterministic output // Create counters with different family names and labels auto z_counter_family = metric::create_counter("z_last_counter", "Last counter alphabetically"); auto z_counter = z_counter_family.create({{"method", "POST"}, {"handler", "api"}}); z_counter.inc(42.0); auto a_counter_family = metric::create_counter("a_first_counter", "First counter alphabetically"); auto a_counter1 = a_counter_family.create({{"status", "200"}}); auto a_counter2 = a_counter_family.create( {{"method", "GET"}}); // Should come before status lexicographically a_counter1.inc(100.0); a_counter2.inc(200.0); // Create gauges with different orderings auto m_gauge_family = metric::create_gauge("m_middle_gauge", "Middle gauge"); auto m_gauge = m_gauge_family.create({{"type", "memory"}}); m_gauge.set(1024.0); auto b_gauge_family = metric::create_gauge("b_second_gauge", "Second gauge"); auto b_gauge = b_gauge_family.create({{"region", "us-west"}}); b_gauge.set(256.0); // Create histograms auto x_hist_family = metric::create_histogram("x_histogram", "Test histogram", {0.1, 0.5, 1.0}); auto x_hist = x_hist_family.create({{"endpoint", "/api/v1"}}); x_hist.observe(0.25); x_hist.observe(0.75); // Add some callbacks to test callback ordering a_counter_family.register_callback({{"callback", "test"}}, []() { return 123.0; }); m_gauge_family.register_callback({{"callback", "dynamic"}}, []() { return 456.0; }); // Render the metrics auto output = metric::render(arena); // Concatenate all output into a single string std::ostringstream oss; for (const auto &line : output) { oss << line; } std::string actual_output = oss.str(); // Define expected golden output - this represents the exact expected // deterministic order std::string expected_golden = "# HELP a_first_counter First counter alphabetically\n" "# TYPE a_first_counter counter\n" "a_first_counter{callback=\"test\"} 123.0\n" "a_first_counter{method=\"GET\"} 200.0\n" "a_first_counter{status=\"200\"} 100.0\n" "# HELP z_last_counter Last counter alphabetically\n" "# TYPE z_last_counter counter\n" "z_last_counter{handler=\"api\",method=\"POST\"} 42.0\n" "# HELP b_second_gauge Second gauge\n" "# TYPE b_second_gauge gauge\n" "b_second_gauge{region=\"us-west\"} 256.0\n" "# HELP m_middle_gauge Middle gauge\n" "# TYPE m_middle_gauge gauge\n" "m_middle_gauge{callback=\"dynamic\"} 456.0\n" "m_middle_gauge{type=\"memory\"} 1024.0\n" "# HELP x_histogram Test histogram\n" "# TYPE x_histogram histogram\n" "x_histogram_bucket{endpoint=\"/api/v1\",le=\"0.1\"} 0\n" "x_histogram_bucket{endpoint=\"/api/v1\",le=\"0.5\"} 1\n" "x_histogram_bucket{endpoint=\"/api/v1\",le=\"1.0\"} 2\n" "x_histogram_bucket{endpoint=\"/api/v1\",le=\"+Inf\"} 2\n" "x_histogram_sum{endpoint=\"/api/v1\"} 1.0\n" "x_histogram_count{endpoint=\"/api/v1\"} 2\n"; // Check if output matches golden file if (actual_output != expected_golden) { MESSAGE("Render output does not match expected golden output."); MESSAGE("This indicates the deterministic ordering has changed."); MESSAGE("Expected output:\n" << expected_golden); MESSAGE("Actual output:\n" << actual_output); CHECK(false); // Force test failure } else { CHECK(true); // Test passes } }