660 lines
21 KiB
C++
660 lines
21 KiB
C++
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
|
|
#include <doctest/doctest.h>
|
|
|
|
#include "arena_allocator.hpp"
|
|
#include "metric.hpp"
|
|
|
|
#include <atomic>
|
|
#include <chrono>
|
|
#include <cmath>
|
|
#include <latch>
|
|
#include <thread>
|
|
#include <vector>
|
|
|
|
TEST_CASE("metric validation functions") {
|
|
SUBCASE("valid metric names") {
|
|
CHECK(metric::is_valid_metric_name("valid_name"));
|
|
CHECK(metric::is_valid_metric_name("ValidName"));
|
|
CHECK(metric::is_valid_metric_name("valid:name"));
|
|
CHECK(metric::is_valid_metric_name("_valid"));
|
|
CHECK(metric::is_valid_metric_name("valid_123"));
|
|
CHECK(metric::is_valid_metric_name("prometheus_metric_name"));
|
|
}
|
|
|
|
SUBCASE("invalid metric names") {
|
|
CHECK_FALSE(metric::is_valid_metric_name(""));
|
|
CHECK_FALSE(metric::is_valid_metric_name("123invalid"));
|
|
CHECK_FALSE(metric::is_valid_metric_name("invalid-name"));
|
|
CHECK_FALSE(metric::is_valid_metric_name("invalid.name"));
|
|
CHECK_FALSE(metric::is_valid_metric_name("invalid name"));
|
|
}
|
|
|
|
SUBCASE("valid label keys") {
|
|
CHECK(metric::is_valid_label_key("valid_key"));
|
|
CHECK(metric::is_valid_label_key("ValidKey"));
|
|
CHECK(metric::is_valid_label_key("valid123"));
|
|
CHECK(metric::is_valid_label_key("_valid"));
|
|
}
|
|
|
|
SUBCASE("invalid label keys") {
|
|
CHECK_FALSE(metric::is_valid_label_key(""));
|
|
CHECK_FALSE(metric::is_valid_label_key("123invalid"));
|
|
CHECK_FALSE(metric::is_valid_label_key("invalid:key"));
|
|
CHECK_FALSE(metric::is_valid_label_key("invalid-key"));
|
|
CHECK_FALSE(metric::is_valid_label_key("__reserved"));
|
|
CHECK_FALSE(metric::is_valid_label_key("__internal"));
|
|
}
|
|
|
|
SUBCASE("valid label values") {
|
|
CHECK(metric::is_valid_label_value("any_value"));
|
|
CHECK(metric::is_valid_label_value("123"));
|
|
CHECK(metric::is_valid_label_value("special-chars.allowed"));
|
|
CHECK(metric::is_valid_label_value(""));
|
|
CHECK(metric::is_valid_label_value("unicode测试"));
|
|
}
|
|
}
|
|
|
|
TEST_CASE("counter basic functionality") {
|
|
auto counter_family =
|
|
metric::create_counter("test_counter", "Test counter help");
|
|
|
|
SUBCASE("create counter with no labels") {
|
|
auto counter = counter_family.create({});
|
|
counter.inc(1.0);
|
|
counter.inc(2.5);
|
|
counter.inc(); // Default increment of 1.0
|
|
}
|
|
|
|
SUBCASE("create counter with labels") {
|
|
auto counter =
|
|
counter_family.create({{"method", "GET"}, {"status", "200"}});
|
|
counter.inc(5.0);
|
|
|
|
// Same labels should return same instance (idempotent)
|
|
auto counter2 =
|
|
counter_family.create({{"method", "GET"}, {"status", "200"}});
|
|
counter2.inc(3.0);
|
|
}
|
|
|
|
SUBCASE("label sorting") {
|
|
// Labels should be sorted by key
|
|
auto counter1 =
|
|
counter_family.create({{"z_key", "value"}, {"a_key", "value"}});
|
|
auto counter2 =
|
|
counter_family.create({{"a_key", "value"}, {"z_key", "value"}});
|
|
|
|
// These should be the same instance due to label sorting
|
|
counter1.inc(1.0);
|
|
counter2.inc(2.0); // Should add to same counter
|
|
}
|
|
}
|
|
|
|
TEST_CASE("gauge basic functionality") {
|
|
auto gauge_family = metric::create_gauge("test_gauge", "Test gauge help");
|
|
|
|
SUBCASE("gauge operations") {
|
|
auto gauge = gauge_family.create({{"instance", "test"}});
|
|
|
|
gauge.set(10.0);
|
|
gauge.inc(5.0);
|
|
gauge.dec(3.0);
|
|
gauge.inc(); // Default increment
|
|
gauge.dec(); // Default decrement
|
|
}
|
|
|
|
SUBCASE("gauge with multiple instances") {
|
|
auto gauge1 = gauge_family.create({{"instance", "test1"}});
|
|
auto gauge2 = gauge_family.create({{"instance", "test2"}});
|
|
|
|
gauge1.set(100.0);
|
|
gauge2.set(200.0);
|
|
|
|
gauge1.inc(50.0);
|
|
gauge2.dec(25.0);
|
|
}
|
|
}
|
|
|
|
TEST_CASE("histogram basic functionality") {
|
|
auto hist_family =
|
|
metric::create_histogram("test_latency", "Test latency histogram",
|
|
metric::exponential_buckets(0.1, 2.0, 5));
|
|
|
|
SUBCASE("histogram observations") {
|
|
auto hist = hist_family.create({{"endpoint", "/api"}});
|
|
|
|
hist.observe(0.05); // Below first bucket
|
|
hist.observe(0.3); // Between buckets
|
|
hist.observe(1.5); // Between buckets
|
|
hist.observe(10.0); // Above all explicit buckets (goes in +Inf)
|
|
}
|
|
|
|
SUBCASE("histogram bucket validation") {
|
|
// Buckets should be sorted and deduplicated, with +Inf added
|
|
auto hist_family2 = metric::create_histogram(
|
|
"test_hist2", "Test",
|
|
std::initializer_list<double>{5.0, 1.0, 2.5, 1.0,
|
|
0.5}); // Unsorted with duplicate
|
|
|
|
auto hist = hist_family2.create({});
|
|
hist.observe(0.1);
|
|
hist.observe(1.5);
|
|
hist.observe(100.0); // Should go in +Inf bucket
|
|
}
|
|
}
|
|
|
|
TEST_CASE("histogram bucket generators") {
|
|
SUBCASE("linear_buckets basic functionality") {
|
|
// Linear buckets: start=0, width=10, count=5 -> {0, 10, 20, 30, 40}
|
|
auto buckets = metric::linear_buckets(0.0, 10.0, 5);
|
|
|
|
CHECK(buckets.size() == 5); // exactly count buckets
|
|
CHECK(buckets[0] == 0.0);
|
|
CHECK(buckets[1] == 10.0);
|
|
CHECK(buckets[2] == 20.0);
|
|
CHECK(buckets[3] == 30.0);
|
|
CHECK(buckets[4] == 40.0);
|
|
}
|
|
|
|
SUBCASE("linear_buckets with non-zero start") {
|
|
// Linear buckets: start=5, width=2.5, count=3 -> {5, 7.5, 10}
|
|
auto buckets = metric::linear_buckets(5.0, 2.5, 3);
|
|
|
|
CHECK(buckets.size() == 3);
|
|
CHECK(buckets[0] == 5.0);
|
|
CHECK(buckets[1] == 7.5);
|
|
CHECK(buckets[2] == 10.0);
|
|
}
|
|
|
|
SUBCASE("linear_buckets edge cases") {
|
|
// Zero count should give empty vector
|
|
auto zero_buckets = metric::linear_buckets(100.0, 10.0, 0);
|
|
CHECK(zero_buckets.size() == 0);
|
|
|
|
// Negative start should work
|
|
auto negative_buckets = metric::linear_buckets(-10.0, 5.0, 2);
|
|
CHECK(negative_buckets.size() == 2);
|
|
CHECK(negative_buckets[0] == -10.0);
|
|
CHECK(negative_buckets[1] == -5.0);
|
|
}
|
|
|
|
SUBCASE("exponential_buckets basic functionality") {
|
|
// Exponential buckets: start=1, factor=2, count=5 -> {1, 2, 4, 8, 16}
|
|
auto buckets = metric::exponential_buckets(1.0, 2.0, 5);
|
|
|
|
CHECK(buckets.size() == 5); // exactly count buckets
|
|
CHECK(buckets[0] == 1.0);
|
|
CHECK(buckets[1] == 2.0);
|
|
CHECK(buckets[2] == 4.0);
|
|
CHECK(buckets[3] == 8.0);
|
|
CHECK(buckets[4] == 16.0);
|
|
}
|
|
|
|
SUBCASE("exponential_buckets different factor") {
|
|
// Exponential buckets: start=0.1, factor=10, count=3 -> {0.1, 1, 10}
|
|
auto buckets = metric::exponential_buckets(0.1, 10.0, 3);
|
|
|
|
CHECK(buckets.size() == 3);
|
|
CHECK(buckets[0] == doctest::Approx(0.1));
|
|
CHECK(buckets[1] == doctest::Approx(1.0));
|
|
CHECK(buckets[2] == doctest::Approx(10.0));
|
|
}
|
|
|
|
SUBCASE("exponential_buckets typical latency pattern") {
|
|
// Typical web service latency buckets: 5ms, 10ms, 20ms, 40ms, 80ms, etc.
|
|
auto buckets = metric::exponential_buckets(0.005, 2.0, 8);
|
|
|
|
CHECK(buckets.size() == 8);
|
|
CHECK(buckets[0] == doctest::Approx(0.005)); // 5ms
|
|
CHECK(buckets[1] == doctest::Approx(0.010)); // 10ms
|
|
CHECK(buckets[2] == doctest::Approx(0.020)); // 20ms
|
|
CHECK(buckets[3] == doctest::Approx(0.040)); // 40ms
|
|
CHECK(buckets[4] == doctest::Approx(0.080)); // 80ms
|
|
CHECK(buckets[5] == doctest::Approx(0.160)); // 160ms
|
|
CHECK(buckets[6] == doctest::Approx(0.320)); // 320ms
|
|
CHECK(buckets[7] == doctest::Approx(0.640)); // 640ms
|
|
}
|
|
|
|
SUBCASE("exponential_buckets edge cases") {
|
|
// Zero count should give empty vector
|
|
auto zero_buckets = metric::exponential_buckets(5.0, 3.0, 0);
|
|
CHECK(zero_buckets.size() == 0);
|
|
}
|
|
|
|
SUBCASE("bucket generators with histogram creation") {
|
|
// Test that generated buckets work correctly with histogram creation
|
|
auto linear_hist = metric::create_histogram(
|
|
"linear_test", "Linear test", metric::linear_buckets(0, 100, 5));
|
|
auto linear_instance = linear_hist.create({{"type", "linear"}});
|
|
|
|
// Test observations fall into expected buckets
|
|
linear_instance.observe(50); // Should fall into 100 bucket
|
|
linear_instance.observe(150); // Should fall into 200 bucket
|
|
linear_instance.observe(1000); // Should fall into +Inf bucket
|
|
|
|
auto exp_hist =
|
|
metric::create_histogram("exp_test", "Exponential test",
|
|
metric::exponential_buckets(0.001, 10.0, 4));
|
|
auto exp_instance = exp_hist.create({{"type", "exponential"}});
|
|
|
|
// Test typical latency measurements
|
|
exp_instance.observe(0.0005); // Should fall into 0.001 bucket (1ms)
|
|
exp_instance.observe(0.005); // Should fall into 0.01 bucket (10ms)
|
|
exp_instance.observe(0.05); // Should fall into 0.1 bucket (100ms)
|
|
exp_instance.observe(5.0); // Should fall into +Inf bucket
|
|
}
|
|
|
|
SUBCASE("prometheus compatibility verification") {
|
|
// Verify our bucket generation matches Prometheus Go client behavior
|
|
|
|
// Linear buckets equivalent to Prometheus LinearBuckets(0, 10, 5)
|
|
auto our_linear = metric::linear_buckets(0, 10, 5);
|
|
std::vector<double> expected_linear = {0, 10, 20, 30, 40};
|
|
CHECK(our_linear == expected_linear);
|
|
|
|
// Exponential buckets equivalent to Prometheus ExponentialBuckets(1, 2, 5)
|
|
auto our_exp = metric::exponential_buckets(1, 2, 5);
|
|
std::vector<double> expected_exp = {1, 2, 4, 8, 16};
|
|
CHECK(our_exp == expected_exp);
|
|
|
|
// Default Prometheus histogram buckets (exponential)
|
|
auto default_buckets = metric::exponential_buckets(0.005, 2.5, 9);
|
|
// Should be: .005, .0125, .03125, .078125, .1953125,
|
|
// .48828125, 1.220703125, 3.0517578125, 7.62939453125
|
|
CHECK(default_buckets.size() == 9);
|
|
CHECK(default_buckets[0] == doctest::Approx(0.005));
|
|
CHECK(default_buckets[1] == doctest::Approx(0.0125));
|
|
CHECK(default_buckets[8] == doctest::Approx(7.62939453125));
|
|
}
|
|
}
|
|
|
|
TEST_CASE("callback-based metrics") {
|
|
auto counter_family =
|
|
metric::create_counter("callback_counter", "Callback counter");
|
|
auto gauge_family = metric::create_gauge("callback_gauge", "Callback gauge");
|
|
|
|
SUBCASE("counter callback") {
|
|
std::atomic<double> counter_value{42.0};
|
|
|
|
counter_family.register_callback(
|
|
{{"type", "callback"}},
|
|
[&counter_value]() { return counter_value.load(); });
|
|
|
|
// Callback should be called during render
|
|
ArenaAllocator arena;
|
|
auto output = metric::render(arena);
|
|
CHECK(output.size() > 0);
|
|
}
|
|
|
|
SUBCASE("gauge callback") {
|
|
std::atomic<double> gauge_value{123.5};
|
|
|
|
gauge_family.register_callback({{"type", "callback"}}, [&gauge_value]() {
|
|
return gauge_value.load();
|
|
});
|
|
|
|
ArenaAllocator arena;
|
|
auto output = metric::render(arena);
|
|
CHECK(output.size() > 0);
|
|
}
|
|
|
|
SUBCASE("callback conflict detection") {
|
|
// First create a static instance
|
|
auto counter = counter_family.create({{"conflict", "test"}});
|
|
counter.inc(1.0);
|
|
|
|
// Then try to register a callback with same labels - should abort
|
|
// This is a validation test that would abort in debug builds
|
|
}
|
|
}
|
|
|
|
TEST_CASE("prometheus text format rendering") {
|
|
ArenaAllocator arena;
|
|
|
|
// Create some metrics
|
|
auto counter_family =
|
|
metric::create_counter("http_requests_total", "Total HTTP requests");
|
|
auto counter = counter_family.create({{"method", "GET"}, {"status", "200"}});
|
|
counter.inc(1000);
|
|
|
|
auto gauge_family =
|
|
metric::create_gauge("memory_usage_bytes", "Memory usage");
|
|
auto gauge = gauge_family.create({{"type", "heap"}});
|
|
gauge.set(1048576);
|
|
|
|
auto hist_family = metric::create_histogram(
|
|
"request_duration_seconds", "Request duration",
|
|
metric::exponential_buckets(0.1, 2.0, 3)); // 0.1, 0.2, 0.4, 0.8
|
|
auto hist = hist_family.create({{"handler", "api"}});
|
|
hist.observe(0.25);
|
|
hist.observe(0.75);
|
|
hist.observe(1.5);
|
|
|
|
SUBCASE("render format validation") {
|
|
auto output = metric::render(arena);
|
|
CHECK(output.size() > 0);
|
|
|
|
// Basic format checks
|
|
bool found_help = false;
|
|
bool found_type = false;
|
|
bool found_metric_line = false;
|
|
|
|
for (const auto &line : output) {
|
|
if (line.starts_with("# HELP"))
|
|
found_help = true;
|
|
if (line.starts_with("# TYPE"))
|
|
found_type = true;
|
|
if (line.find("http_requests_total") != std::string_view::npos)
|
|
found_metric_line = true;
|
|
}
|
|
|
|
CHECK(found_help);
|
|
CHECK(found_type);
|
|
CHECK(found_metric_line);
|
|
}
|
|
|
|
SUBCASE("special value formatting") {
|
|
auto special_gauge_family =
|
|
metric::create_gauge("special_values", "Special value test");
|
|
auto special_gauge = special_gauge_family.create({});
|
|
|
|
special_gauge.set(std::numeric_limits<double>::infinity());
|
|
auto output = metric::render(arena);
|
|
|
|
// Should contain "+Inf" representation
|
|
bool found_inf = false;
|
|
for (const auto &line : output) {
|
|
if (line.find("+Inf") != std::string_view::npos) {
|
|
found_inf = true;
|
|
break;
|
|
}
|
|
}
|
|
CHECK(found_inf);
|
|
}
|
|
}
|
|
|
|
TEST_CASE("thread safety") {
|
|
constexpr int num_threads = 8;
|
|
constexpr int ops_per_thread = 1000;
|
|
|
|
SUBCASE("counter single-writer semantics") {
|
|
auto counter_family =
|
|
metric::create_counter("thread_test_counter", "Thread test");
|
|
|
|
std::vector<std::thread> threads;
|
|
std::latch start_latch{num_threads};
|
|
|
|
// Each thread creates its own counter instance (safe)
|
|
for (int i = 0; i < num_threads; ++i) {
|
|
threads.emplace_back([&, i]() {
|
|
auto counter =
|
|
counter_family.create({{"thread_id", std::to_string(i)}});
|
|
|
|
start_latch.arrive_and_wait();
|
|
|
|
for (int j = 0; j < ops_per_thread; ++j) {
|
|
counter.inc(1.0);
|
|
}
|
|
});
|
|
}
|
|
|
|
for (auto &t : threads) {
|
|
t.join();
|
|
}
|
|
}
|
|
|
|
SUBCASE("gauge multi-writer contention") {
|
|
auto gauge_family =
|
|
metric::create_gauge("thread_test_gauge", "Thread test gauge");
|
|
|
|
std::vector<std::thread> threads;
|
|
std::latch start_latch{num_threads};
|
|
|
|
// Multiple threads create gauges with the same labels, writing to the same
|
|
// underlying state, testing CAS contention.
|
|
for (int i = 0; i < num_threads; ++i) {
|
|
threads.emplace_back([&]() {
|
|
auto gauge = gauge_family.create({{"shared", "true"}});
|
|
start_latch.arrive_and_wait();
|
|
|
|
for (int j = 0; j < ops_per_thread; ++j) {
|
|
gauge.inc(1.0);
|
|
}
|
|
});
|
|
}
|
|
|
|
for (auto &t : threads) {
|
|
t.join();
|
|
}
|
|
}
|
|
|
|
SUBCASE("histogram single-writer per thread") {
|
|
auto hist_family =
|
|
metric::create_histogram("thread_test_hist", "Thread test histogram",
|
|
std::initializer_list<double>{0.1, 0.5, 1.0});
|
|
|
|
std::vector<std::thread> threads;
|
|
std::latch start_latch{num_threads};
|
|
|
|
for (int i = 0; i < num_threads; ++i) {
|
|
threads.emplace_back([&, i]() {
|
|
auto hist = hist_family.create({{"thread_id", std::to_string(i)}});
|
|
|
|
start_latch.arrive_and_wait();
|
|
|
|
for (int j = 0; j < ops_per_thread; ++j) {
|
|
hist.observe(static_cast<double>(j) / ops_per_thread);
|
|
}
|
|
});
|
|
}
|
|
|
|
for (auto &t : threads) {
|
|
t.join();
|
|
}
|
|
}
|
|
|
|
SUBCASE("concurrent render calls") {
|
|
// Multiple threads calling render concurrently should be safe (serialized
|
|
// by mutex)
|
|
auto counter_family = metric::create_counter("render_test", "Render test");
|
|
auto counter = counter_family.create({});
|
|
counter.inc(100);
|
|
|
|
std::vector<std::thread> threads;
|
|
std::latch start_latch{num_threads};
|
|
std::atomic<int> success_count{0};
|
|
|
|
for (int i = 0; i < num_threads; ++i) {
|
|
threads.emplace_back([&]() {
|
|
start_latch.arrive_and_wait();
|
|
|
|
ArenaAllocator arena;
|
|
auto output = metric::render(arena);
|
|
if (output.size() > 0) {
|
|
success_count.fetch_add(1);
|
|
}
|
|
});
|
|
}
|
|
|
|
for (auto &t : threads) {
|
|
t.join();
|
|
}
|
|
|
|
CHECK(success_count.load() == num_threads);
|
|
}
|
|
}
|
|
|
|
TEST_CASE("thread counter cleanup bug") {
|
|
SUBCASE(
|
|
"counter and histogram values should persist after thread destruction") {
|
|
auto counter_family = metric::create_counter(
|
|
"thread_cleanup_counter", "Counter for thread cleanup test");
|
|
auto histogram_family = metric::create_histogram(
|
|
"thread_cleanup_histogram", "Histogram for thread cleanup test",
|
|
metric::linear_buckets(0.0, 1.0, 5)); // buckets: 0, 1, 2, 3, 4
|
|
|
|
// Variables to collect actual values from worker thread
|
|
double counter_value_in_thread = 0;
|
|
double histogram_sum_in_thread = 0;
|
|
|
|
// Create thread that increments metrics and then exits
|
|
std::thread worker([&]() {
|
|
auto counter = counter_family.create({{"worker", "cleanup_test"}});
|
|
auto histogram = histogram_family.create({{"worker", "cleanup_test"}});
|
|
|
|
counter.inc(1.0);
|
|
histogram.observe(1.5); // Should contribute to sum
|
|
|
|
// Measure actual values from within the thread (before ThreadInit
|
|
// destructor runs)
|
|
ArenaAllocator thread_arena;
|
|
auto thread_output = metric::render(thread_arena);
|
|
|
|
for (const auto &line : thread_output) {
|
|
if (line.find("thread_cleanup_counter{worker=\"cleanup_test\"}") !=
|
|
std::string_view::npos) {
|
|
auto space_pos = line.rfind(' ');
|
|
if (space_pos != std::string_view::npos) {
|
|
auto value_str = line.substr(space_pos + 1);
|
|
if (value_str.back() == '\n') {
|
|
value_str.remove_suffix(1);
|
|
}
|
|
counter_value_in_thread = std::stod(std::string(value_str));
|
|
}
|
|
}
|
|
if (line.find(
|
|
"thread_cleanup_histogram_sum{worker=\"cleanup_test\"}") !=
|
|
std::string_view::npos) {
|
|
auto space_pos = line.rfind(' ');
|
|
if (space_pos != std::string_view::npos) {
|
|
auto value_str = line.substr(space_pos + 1);
|
|
if (value_str.back() == '\n') {
|
|
value_str.remove_suffix(1);
|
|
}
|
|
histogram_sum_in_thread = std::stod(std::string(value_str));
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
// Wait for thread to complete and destroy (triggering ThreadInit
|
|
// destructor)
|
|
worker.join();
|
|
|
|
// Measure values after thread cleanup
|
|
ArenaAllocator arena;
|
|
auto output = metric::render(arena);
|
|
|
|
double counter_value_after = 0;
|
|
double histogram_sum_after = 0;
|
|
|
|
for (const auto &line : output) {
|
|
if (line.find("thread_cleanup_counter{worker=\"cleanup_test\"}") !=
|
|
std::string_view::npos) {
|
|
auto space_pos = line.rfind(' ');
|
|
if (space_pos != std::string_view::npos) {
|
|
auto value_str = line.substr(space_pos + 1);
|
|
if (value_str.back() == '\n') {
|
|
value_str.remove_suffix(1);
|
|
}
|
|
counter_value_after = std::stod(std::string(value_str));
|
|
}
|
|
}
|
|
if (line.find("thread_cleanup_histogram_sum{worker=\"cleanup_test\"}") !=
|
|
std::string_view::npos) {
|
|
auto space_pos = line.rfind(' ');
|
|
if (space_pos != std::string_view::npos) {
|
|
auto value_str = line.substr(space_pos + 1);
|
|
if (value_str.back() == '\n') {
|
|
value_str.remove_suffix(1);
|
|
}
|
|
histogram_sum_after = std::stod(std::string(value_str));
|
|
}
|
|
}
|
|
}
|
|
|
|
// Values should have been captured correctly within the thread
|
|
CHECK(counter_value_in_thread == 1.0);
|
|
CHECK(histogram_sum_in_thread == 1.5);
|
|
|
|
// The bug: These values should persist after thread cleanup but will be
|
|
// lost because ThreadInit destructor erases per-thread state without
|
|
// accumulating values
|
|
CHECK(counter_value_after == 1.0);
|
|
CHECK(histogram_sum_after == 1.5);
|
|
|
|
// The bug: After thread destruction, the counter and histogram values are
|
|
// lost because ThreadInit::~ThreadInit() calls
|
|
// family->perThreadState.erase(thread_id) without accumulating the values
|
|
// into global storage first. This causes counter values to "go backwards"
|
|
// when threads are destroyed, violating the monotonic property of counters.
|
|
}
|
|
}
|
|
|
|
TEST_CASE("error conditions") {
|
|
SUBCASE("counter negative increment") {
|
|
auto counter_family = metric::create_counter("error_counter", "Error test");
|
|
auto counter = counter_family.create({});
|
|
|
|
// This should abort in debug builds due to validation
|
|
// In release builds, behavior is undefined
|
|
// counter.inc(-1.0); // Would abort
|
|
}
|
|
|
|
SUBCASE("invalid metric names") {
|
|
// These should abort due to validation
|
|
// auto bad_counter = metric::create_counter("123invalid", "help"); // Would
|
|
// abort auto bad_gauge = metric::create_gauge("invalid-name", "help"); //
|
|
// Would abort
|
|
}
|
|
|
|
SUBCASE("invalid label keys") {
|
|
auto counter_family = metric::create_counter("valid_name", "help");
|
|
|
|
// This should abort due to label validation
|
|
// auto counter = counter_family.create({{"123invalid", "value"}}); // Would
|
|
// abort
|
|
}
|
|
}
|
|
|
|
TEST_CASE("memory management") {
|
|
SUBCASE("arena allocation in render") {
|
|
ArenaAllocator arena;
|
|
auto initial_used = arena.used_bytes();
|
|
|
|
auto counter_family = metric::create_counter("memory_test", "Memory test");
|
|
auto counter = counter_family.create(
|
|
{{"large_label", "very_long_value_that_takes_space"}});
|
|
counter.inc(42);
|
|
|
|
auto output = metric::render(arena);
|
|
auto final_used = arena.used_bytes();
|
|
|
|
CHECK(output.size() > 0);
|
|
CHECK(final_used > initial_used); // Arena was used for string allocation
|
|
|
|
// All string_views should point to arena memory
|
|
for (const auto &line : output) {
|
|
CHECK(line.size() > 0);
|
|
}
|
|
}
|
|
|
|
SUBCASE("arena reset behavior") {
|
|
ArenaAllocator arena;
|
|
|
|
auto counter_family = metric::create_counter("reset_test", "Reset test");
|
|
auto counter = counter_family.create({});
|
|
counter.inc(1);
|
|
|
|
// Render multiple times with arena resets
|
|
for (int i = 0; i < 5; ++i) {
|
|
auto output = metric::render(arena);
|
|
CHECK(output.size() > 0);
|
|
arena.reset(); // Should not affect metric values, only arena memory
|
|
}
|
|
|
|
// Final render should still work
|
|
auto final_output = metric::render(arena);
|
|
CHECK(final_output.size() > 0);
|
|
}
|
|
}
|