1803 lines
65 KiB
C++
1803 lines
65 KiB
C++
#include "metric.hpp"
|
|
|
|
#include <algorithm>
|
|
#include <atomic>
|
|
#include <bit>
|
|
#include <cassert>
|
|
#include <cctype>
|
|
#include <cmath>
|
|
#include <cstdint>
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <cstring>
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <set>
|
|
#include <string>
|
|
#include <thread>
|
|
#include <type_traits>
|
|
#include <unordered_map>
|
|
#include <unordered_set>
|
|
#include <vector>
|
|
|
|
#include <immintrin.h>
|
|
#include <simdutf.h>
|
|
|
|
#include "arena.hpp"
|
|
#include "format.hpp"
|
|
|
|
// WeaselDB Metrics System Design:
|
|
//
|
|
// THREADING MODEL:
|
|
// - Counters: Per-thread storage, single writer, atomic write/read coordination
|
|
// with render thread
|
|
// - Histograms: Per-thread storage, single writer, mutex protection for all
|
|
// access (both observe and render)
|
|
// - Gauges: Global storage with atomic CAS operations (multi-writer, no mutex
|
|
// needed)
|
|
//
|
|
// SYNCHRONIZATION STRATEGY:
|
|
// - Counters: Atomic store in Counter::inc(), atomic load in render thread
|
|
// - Histograms: Mutex serializes all access - updates in observe(), reads in
|
|
// render
|
|
// - Gauges: Lock-free atomic operations for all updates and reads
|
|
//
|
|
// PRECISION STRATEGY:
|
|
// - Use atomic<uint64_t> for lock-free storage
|
|
// - Store doubles using std::bit_cast to uint64_t (preserves full IEEE 754
|
|
// precision)
|
|
// - Single writer for counters enables simple atomic store/load
|
|
//
|
|
// MEMORY MODEL:
|
|
// - Thread-local metrics auto-cleanup on thread destruction
|
|
// - Global metrics (gauges) persist for application lifetime
|
|
// - Histogram buckets are sorted, deduplicated, sizes never change after
|
|
// creation
|
|
|
|
namespace metric {
|
|
|
|
// ARENA OWNERSHIP AND MEMORY MANAGEMENT DOCUMENTATION
|
|
//
|
|
// The metrics system uses multiple arena allocators with distinct ownership
|
|
// patterns:
|
|
//
|
|
// 1. GLOBAL ARENA (get_global_arena()):
|
|
// - Lifetime: Application lifetime (never destroyed)
|
|
// - Purpose: Persistent storage for metric families, interned labels, and
|
|
// global state
|
|
// - Owner: Static storage - automatically managed
|
|
// - Content: Family names, help text, LabelsKey instances, global
|
|
// accumulated values
|
|
//
|
|
// 2. THREAD-LOCAL ARENA (get_thread_local_arena()):
|
|
// - Lifetime: Per-thread lifetime (destroyed on thread exit)
|
|
// - Purpose: Storage for per-thread metric instances (Counter::State,
|
|
// Histogram::State)
|
|
// - Owner: thread_local ThreadInit instance
|
|
// - Content: Thread-specific metric instance state
|
|
//
|
|
// 3. TEMPORARY ARENAS:
|
|
// a) Caller-Provided Arenas (Arena& parameters):
|
|
// - Lifetime: Controlled by caller (function parameter)
|
|
// - Purpose: Output formatting where caller controls result lifetime
|
|
// - Owner: Caller owns arena and controls string lifetime
|
|
// - Example: render(Arena& arena) - caller manages arena
|
|
// lifecycle
|
|
//
|
|
// b) Stack-Owned Temporary Arenas:
|
|
// - Lifetime: Function/scope lifetime (automatic destruction)
|
|
// - Purpose: Internal temporary allocations for lookups and processing
|
|
// - Owner: Function owns arena on stack, destroyed at scope exit
|
|
// - Example: intern_labels() creates Arena lookup_arena(1024)
|
|
//
|
|
// CRITICAL OWNERSHIP RULES:
|
|
//
|
|
// - LabelsKey Arena Dependency: LabelsKey instances store string_views pointing
|
|
// to arena-allocated memory. The arena MUST outlive any LabelsKey that
|
|
// references its memory. LabelsKey constructor copies input strings into the
|
|
// provided arena.
|
|
//
|
|
// - Render Function: render(arena) allocates ALL output strings in the provided
|
|
// arena. Callers own the arena and control string lifetime. String_views
|
|
// become invalid after arena.reset() or arena destruction.
|
|
//
|
|
// - Thread Cleanup: ThreadInit destructor accumulates thread-local
|
|
// counter/histogram
|
|
// values into global storage before thread exit. This copies VALUES (not
|
|
// ownership) to prevent metric data loss when threads are destroyed.
|
|
//
|
|
// - Family Creation: Uses placement new in global arena without explicit
|
|
// destructors.
|
|
// This is acceptable because Family::State instances persist for application
|
|
// lifetime and the global arena is never destroyed.
|
|
|
|
// Validation helper that works in both debug and release builds
|
|
static void validate_or_abort(bool condition, const char *message,
|
|
std::string_view value) {
|
|
if (!condition) {
|
|
std::fprintf(stderr, "WeaselDB metric validation failed: %s: '%.*s'\n",
|
|
message, static_cast<int>(value.size()), value.data());
|
|
std::abort();
|
|
}
|
|
}
|
|
|
|
// Arena-based labels key for second level of map
|
|
// Uses string_view containing labels in Prometheus text format
|
|
struct LabelsKey {
|
|
std::string_view prometheus_format;
|
|
|
|
// Arena-owning constructor (copies strings into arena and formats as
|
|
// Prometheus text)
|
|
LabelsKey(std::span<const std::pair<std::string_view, std::string_view>> l,
|
|
Arena &arena) {
|
|
// Copy and validate all label keys and values, sort by key
|
|
ArenaVector<std::pair<std::string_view, std::string_view>> labels(&arena);
|
|
for (const auto &[key, value] : l) {
|
|
validate_or_abort(is_valid_label_key(key), "invalid label key", key);
|
|
validate_or_abort(is_valid_label_value(value), "invalid label value",
|
|
value);
|
|
|
|
auto key_view = arena.copy_string(key);
|
|
auto value_view = arena.copy_string(value);
|
|
labels.push_back({key_view, value_view});
|
|
}
|
|
|
|
// Sort labels by key for Prometheus compatibility
|
|
std::sort(labels.data(), labels.data() + labels.size(),
|
|
[](const auto &a, const auto &b) { return a.first < b.first; });
|
|
|
|
// Generate Prometheus text format: {key1="value1",key2="value2"}
|
|
if (labels.empty()) {
|
|
prometheus_format = "";
|
|
} else {
|
|
// Calculate required size for formatted string
|
|
size_t required_size = 2; // {}
|
|
for (const auto &[key, value] : labels) {
|
|
required_size += key.length() + 3 + value.length(); // key="value"
|
|
for (char c : value) {
|
|
if (c == '\\' || c == '"' || c == '\n') {
|
|
required_size++;
|
|
}
|
|
}
|
|
}
|
|
required_size += labels.size() - 1; // commas
|
|
|
|
// Generate formatted string in arena
|
|
char *buf = arena.allocate<char>(required_size);
|
|
char *p = buf;
|
|
|
|
*p++ = '{';
|
|
for (size_t i = 0; i < labels.size(); ++i) {
|
|
if (i > 0)
|
|
*p++ = ',';
|
|
std::memcpy(p, labels[i].first.data(), labels[i].first.length());
|
|
p += labels[i].first.length();
|
|
*p++ = '=';
|
|
*p++ = '"';
|
|
for (char c : labels[i].second) {
|
|
switch (c) {
|
|
case '\\':
|
|
*p++ = '\\';
|
|
*p++ = '\\';
|
|
break;
|
|
case '"':
|
|
*p++ = '\\';
|
|
*p++ = '"';
|
|
break;
|
|
case '\n':
|
|
*p++ = '\\';
|
|
*p++ = 'n';
|
|
break;
|
|
default:
|
|
*p++ = c;
|
|
break;
|
|
}
|
|
}
|
|
*p++ = '"';
|
|
}
|
|
*p++ = '}';
|
|
prometheus_format = std::string_view(buf, p - buf);
|
|
}
|
|
}
|
|
|
|
bool operator==(const LabelsKey &other) const {
|
|
return prometheus_format == other.prometheus_format;
|
|
}
|
|
|
|
bool operator<(const LabelsKey &other) const {
|
|
return prometheus_format < other.prometheus_format;
|
|
}
|
|
};
|
|
|
|
} // namespace metric
|
|
|
|
namespace std {
|
|
template <> struct hash<metric::LabelsKey> {
|
|
std::size_t operator()(const metric::LabelsKey &k) const {
|
|
return std::hash<std::string_view>{}(k.prometheus_format);
|
|
}
|
|
};
|
|
} // namespace std
|
|
|
|
namespace metric {
|
|
|
|
// DESIGN: Store doubles in atomic<uint64_t> for lock-free operations
|
|
// - Preserves full IEEE 754 double precision (no truncation)
|
|
// - Allows atomic load/store without locks
|
|
// - Use std::bit_cast for safe conversion between double and uint64_t
|
|
|
|
// Family::State structures own the second-level maps (labels -> instances)
|
|
template <> struct Family<Counter>::State {
|
|
std::string_view name;
|
|
std::string_view help;
|
|
|
|
struct PerThreadState {
|
|
std::unordered_map<
|
|
LabelsKey, Counter::State *, std::hash<LabelsKey>,
|
|
std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Counter::State *>>>
|
|
instances;
|
|
|
|
explicit PerThreadState(Arena &arena)
|
|
: instances(
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Counter::State *>>(
|
|
&arena)) {}
|
|
};
|
|
std::unordered_map<std::thread::id, PerThreadState> per_thread_state;
|
|
|
|
// Global accumulation state for destroyed threads
|
|
std::unordered_map<
|
|
LabelsKey, Counter::State *, std::hash<LabelsKey>,
|
|
std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Counter::State *>>>
|
|
global_accumulated_values;
|
|
|
|
// Callback-based metrics (global, not per-thread)
|
|
std::map<
|
|
LabelsKey, MetricCallback<Counter>, std::less<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, MetricCallback<Counter>>>>
|
|
callbacks;
|
|
|
|
State(Arena &arena)
|
|
: global_accumulated_values(
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Counter::State *>>(
|
|
&arena)),
|
|
callbacks(
|
|
ArenaStlAllocator<
|
|
std::pair<const LabelsKey, MetricCallback<Counter>>>(&arena)) {}
|
|
};
|
|
|
|
template <> struct Family<Gauge>::State {
|
|
std::string_view name;
|
|
std::string_view help;
|
|
std::unordered_map<
|
|
LabelsKey, Gauge::State *, std::hash<LabelsKey>, std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Gauge::State *>>>
|
|
instances;
|
|
|
|
// Callback-based metrics
|
|
std::map<LabelsKey, MetricCallback<Gauge>, std::less<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, MetricCallback<Gauge>>>>
|
|
callbacks;
|
|
|
|
State(Arena &arena)
|
|
: instances(ArenaStlAllocator<std::pair<const LabelsKey, Gauge::State *>>(
|
|
&arena)),
|
|
callbacks(ArenaStlAllocator<
|
|
std::pair<const LabelsKey, MetricCallback<Gauge>>>(&arena)) {}
|
|
};
|
|
|
|
template <> struct Family<Histogram>::State {
|
|
std::string_view name;
|
|
std::string_view help;
|
|
ArenaVector<double> buckets;
|
|
|
|
struct PerThreadState {
|
|
std::unordered_map<
|
|
LabelsKey, Histogram::State *, std::hash<LabelsKey>,
|
|
std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Histogram::State *>>>
|
|
instances;
|
|
|
|
explicit PerThreadState(Arena &arena)
|
|
: instances(
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Histogram::State *>>(
|
|
&arena)) {}
|
|
};
|
|
std::unordered_map<std::thread::id, PerThreadState> per_thread_state;
|
|
|
|
// Global accumulation state for destroyed threads
|
|
std::unordered_map<
|
|
LabelsKey, Histogram::State *, std::hash<LabelsKey>,
|
|
std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Histogram::State *>>>
|
|
global_accumulated_values;
|
|
|
|
State(Arena &arena)
|
|
: buckets(&arena),
|
|
global_accumulated_values(
|
|
ArenaStlAllocator<std::pair<const LabelsKey, Histogram::State *>>(
|
|
&arena)) {}
|
|
|
|
// Note: No callbacks map - histograms don't support callback-based metrics
|
|
};
|
|
|
|
// Counter: Thread-local, monotonically increasing, single writer
|
|
struct Counter::State {
|
|
double value; // Single writer, atomic coordination with render thread
|
|
friend struct Metric;
|
|
};
|
|
|
|
// Gauge: Global, can increase/decrease, multiple writers (uses atomic CAS)
|
|
struct Gauge::State {
|
|
std::atomic<uint64_t>
|
|
value; // Stores double as uint64_t bits, lock-free CAS operations
|
|
friend struct Metric;
|
|
};
|
|
|
|
// Histogram: Thread-local buckets, single writer, mutex protection per thread,
|
|
// per histogram
|
|
struct Histogram::State {
|
|
std::span<const double> thresholds; // Bucket boundaries (sorted,
|
|
// deduplicated, sizes never change)
|
|
std::span<uint64_t> counts; // Count per bucket
|
|
double sum; // Sum of observations
|
|
uint64_t observations; // Total observation count
|
|
std::mutex
|
|
mutex; // Per-thread, per-histogram mutex for consistent reads/writes
|
|
|
|
State() : sum(0.0), observations(0) {}
|
|
friend struct Metric;
|
|
};
|
|
|
|
struct Metric {
|
|
// We use a raw pointer to these in a map, so we don't call their destructors
|
|
static_assert(std::is_trivially_destructible_v<Counter::State>);
|
|
static_assert(std::is_trivially_destructible_v<Gauge::State>);
|
|
static_assert(std::is_trivially_destructible_v<Histogram::State>);
|
|
static std::mutex mutex;
|
|
|
|
// Global arena allocator for metric families and persistent global state
|
|
static Arena &get_global_arena() {
|
|
static auto *global_arena = new Arena(64 * 1024); // 64KB initial size
|
|
return *global_arena;
|
|
}
|
|
|
|
// Function-local statics to avoid static initialization order fiasco
|
|
static auto &get_counter_families() {
|
|
using FamilyMap = std::map<
|
|
std::string_view, Arena::Ptr<Family<Counter>::State>,
|
|
std::less<std::string_view>,
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Counter>::State>>>>;
|
|
static FamilyMap *counterFamilies = new FamilyMap(
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Counter>::State>>>(
|
|
&get_global_arena()));
|
|
return *counterFamilies;
|
|
}
|
|
|
|
static auto &get_gauge_families() {
|
|
using FamilyMap = std::map<
|
|
std::string_view, Arena::Ptr<Family<Gauge>::State>,
|
|
std::less<std::string_view>,
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Gauge>::State>>>>;
|
|
static FamilyMap *gaugeFamilies = new FamilyMap(
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Gauge>::State>>>(
|
|
&get_global_arena()));
|
|
return *gaugeFamilies;
|
|
}
|
|
|
|
static auto &get_histogram_families() {
|
|
using FamilyMap = std::map<
|
|
std::string_view, Arena::Ptr<Family<Histogram>::State>,
|
|
std::less<std::string_view>,
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Histogram>::State>>>>;
|
|
static FamilyMap *histogramFamilies = new FamilyMap(
|
|
ArenaStlAllocator<std::pair<const std::string_view,
|
|
Arena::Ptr<Family<Histogram>::State>>>(
|
|
&get_global_arena()));
|
|
return *histogramFamilies;
|
|
}
|
|
|
|
// Global label interning set to avoid duplicate LabelsKey allocations
|
|
static auto &get_interned_labels() {
|
|
using InternSet = std::unordered_set<LabelsKey, std::hash<LabelsKey>,
|
|
std::equal_to<LabelsKey>,
|
|
ArenaStlAllocator<LabelsKey>>;
|
|
static InternSet *internedLabels =
|
|
new InternSet(ArenaStlAllocator<LabelsKey>(&get_global_arena()));
|
|
return *internedLabels;
|
|
}
|
|
|
|
// Global static text interning set to avoid duplicate text allocations
|
|
static auto &get_interned_static_text() {
|
|
using StaticTextSet =
|
|
std::unordered_set<std::string_view, std::hash<std::string_view>,
|
|
std::equal_to<std::string_view>,
|
|
ArenaStlAllocator<std::string_view>>;
|
|
static StaticTextSet *internedStaticText = new StaticTextSet(
|
|
ArenaStlAllocator<std::string_view>(&get_global_arena()));
|
|
return *internedStaticText;
|
|
}
|
|
|
|
// Registry of all thread arenas for memory tracking
|
|
static auto &get_thread_arenas() {
|
|
using ThreadArenaMap = std::unordered_map<std::thread::id, Arena *>;
|
|
static ThreadArenaMap *threadArenas = new ThreadArenaMap();
|
|
return *threadArenas;
|
|
}
|
|
|
|
static auto &get_collectors() {
|
|
using CollectorRegistry = std::vector<Ref<Collector>>;
|
|
static CollectorRegistry *collectors = new CollectorRegistry();
|
|
return *collectors;
|
|
}
|
|
|
|
// Thread cleanup for per-family thread-local storage
|
|
struct ThreadInit {
|
|
Arena arena;
|
|
ThreadInit() {
|
|
// Register this thread's arena for memory tracking
|
|
std::unique_lock _{mutex};
|
|
get_thread_arenas()[std::this_thread::get_id()] = &arena;
|
|
}
|
|
~ThreadInit() {
|
|
// Accumulate thread-local state into global state before cleanup
|
|
// THREAD SAFETY: All operations below are protected by the global mutex,
|
|
// including writes to global accumulated state, preventing races with
|
|
// render thread
|
|
std::unique_lock _{mutex};
|
|
// NOTE: registration_version increment is REQUIRED here because:
|
|
// - Cached render plan has pre-resolved pointers to thread-local state
|
|
// - When threads disappear, these pointers become invalid
|
|
// - Cache invalidation forces rebuild with updated pointer sets
|
|
++Metric::registration_version;
|
|
auto thread_id = std::this_thread::get_id();
|
|
|
|
// Unregister this thread's arena from memory tracking
|
|
get_thread_arenas().erase(thread_id);
|
|
|
|
// Accumulate counter families
|
|
for (auto &[name, family] : Metric::get_counter_families()) {
|
|
auto thread_it = family->per_thread_state.find(thread_id);
|
|
if (thread_it != family->per_thread_state.end()) {
|
|
for (auto &[labels_key, instance] : thread_it->second.instances) {
|
|
// Get current thread-local value
|
|
double current_value = instance->value;
|
|
|
|
// Ensure global accumulator exists
|
|
auto &global_state = family->global_accumulated_values[labels_key];
|
|
if (!global_state) {
|
|
global_state = get_global_arena().construct<Counter::State>();
|
|
global_state->value = 0.0;
|
|
}
|
|
|
|
// Add thread-local value to global accumulator (mutex already held)
|
|
global_state->value += current_value;
|
|
}
|
|
family->per_thread_state.erase(thread_it);
|
|
}
|
|
}
|
|
|
|
// Accumulate histogram families
|
|
for (auto &[name, family] : Metric::get_histogram_families()) {
|
|
auto thread_it = family->per_thread_state.find(thread_id);
|
|
if (thread_it != family->per_thread_state.end()) {
|
|
for (auto &[labels_key, instance] : thread_it->second.instances) {
|
|
// Acquire lock to get consistent snapshot
|
|
std::lock_guard lock(instance->mutex);
|
|
|
|
// Global accumulator should have been created when we made the
|
|
// histogram
|
|
auto &global_state = family->global_accumulated_values[labels_key];
|
|
assert(global_state);
|
|
|
|
// Accumulate bucket counts (mutex already held)
|
|
for (size_t i = 0; i < instance->counts.size(); ++i) {
|
|
global_state->counts[i] += instance->counts[i];
|
|
}
|
|
|
|
// Accumulate sum and observations
|
|
global_state->sum += instance->sum;
|
|
global_state->observations += instance->observations;
|
|
}
|
|
family->per_thread_state.erase(thread_it);
|
|
}
|
|
}
|
|
|
|
// Gauges are global, no per-thread cleanup needed
|
|
}
|
|
};
|
|
static thread_local ThreadInit thread_init;
|
|
|
|
// Thread-local arena allocator for metric instances
|
|
static Arena &get_thread_local_arena() { return thread_init.arena; }
|
|
|
|
// Thread cleanup now handled by ThreadInit RAII
|
|
|
|
// Intern labels to avoid duplicate arena allocations
|
|
static const LabelsKey &intern_labels(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
auto &interned_set = get_interned_labels();
|
|
|
|
// MEMORY EFFICIENCY PATTERN:
|
|
// Use temporary stack-allocated arena for lookup operations to avoid
|
|
// unbounded memory growth in global arena. Only allocate in global arena
|
|
// for genuinely new label combinations.
|
|
//
|
|
// SAFETY: This pattern is safe because:
|
|
// 1. std::unordered_set::find() uses lookup_key transiently for
|
|
// hashing/comparison
|
|
// 2. Hash set implementations don't retain references to lookup keys after
|
|
// find()
|
|
// 3. lookup_arena is destroyed after find() completes, but we don't use its
|
|
// memory anymore
|
|
// 4. All returned references point to global arena memory (application
|
|
// lifetime)
|
|
|
|
// Create temporary lookup key using stack-allocated arena
|
|
Arena lookup_arena(1024); // Small arena for lookups only
|
|
LabelsKey lookup_key{labels, lookup_arena};
|
|
|
|
// Use standard hash set lookup - lookup_key memory used transiently only
|
|
auto it = interned_set.find(lookup_key);
|
|
if (it != interned_set.end()) {
|
|
// Found: return reference to permanently stored key in global arena
|
|
// lookup_arena will be destroyed but we're not using its memory
|
|
return *it;
|
|
}
|
|
|
|
// Not found - create and intern new key in global arena
|
|
// This is the ONLY place where we consume global arena memory for labels
|
|
LabelsKey new_key{labels, get_global_arena()};
|
|
auto result = interned_set.emplace(std::move(new_key));
|
|
return *result.first;
|
|
}
|
|
|
|
// Intern static text to avoid duplicate allocations
|
|
static std::string_view intern_static_text(std::string_view text) {
|
|
auto &interned_set = get_interned_static_text();
|
|
|
|
// Check if text is already interned
|
|
auto it = interned_set.find(text);
|
|
if (it != interned_set.end()) {
|
|
return *it;
|
|
}
|
|
|
|
// Not found - copy to global arena and intern
|
|
auto interned_text = get_global_arena().copy_string(text);
|
|
auto result = interned_set.emplace(interned_text);
|
|
return *result.first;
|
|
}
|
|
|
|
static Counter create_counter_instance(
|
|
Family<Counter> *family,
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
// Force thread_local initialization
|
|
(void)thread_init;
|
|
|
|
std::unique_lock _{mutex};
|
|
++Metric::registration_version;
|
|
const LabelsKey &key = intern_labels(labels);
|
|
|
|
// Validate that labels aren't already registered as callback
|
|
validate_or_abort(
|
|
family->p->callbacks.find(key) == family->p->callbacks.end(),
|
|
"labels already registered as callback",
|
|
key.prometheus_format.empty() ? "(no labels)" : key.prometheus_format);
|
|
|
|
// Ensure thread state exists
|
|
auto thread_id = std::this_thread::get_id();
|
|
auto per_thread_it = family->p->per_thread_state.find(thread_id);
|
|
if (per_thread_it == family->p->per_thread_state.end()) {
|
|
// Create new PerThreadState with thread-local arena
|
|
auto result = family->p->per_thread_state.emplace(
|
|
thread_id,
|
|
Family<Counter>::State::PerThreadState(get_thread_local_arena()));
|
|
per_thread_it = result.first;
|
|
}
|
|
|
|
auto &ptr = per_thread_it->second.instances[key];
|
|
if (!ptr) {
|
|
ptr = get_thread_local_arena().construct<Counter::State>();
|
|
ptr->value = 0.0;
|
|
|
|
// Ensure global accumulator exists for this label set
|
|
auto &global_state = family->p->global_accumulated_values[key];
|
|
if (!global_state) {
|
|
global_state = get_global_arena().construct<Counter::State>();
|
|
global_state->value = 0.0;
|
|
}
|
|
}
|
|
Counter result;
|
|
result.p = ptr;
|
|
return result;
|
|
}
|
|
|
|
static Gauge create_gauge_instance(
|
|
Family<Gauge> *family,
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
std::unique_lock _{mutex};
|
|
++Metric::registration_version;
|
|
const LabelsKey &key = intern_labels(labels);
|
|
|
|
// Validate that labels aren't already registered as callback
|
|
validate_or_abort(
|
|
family->p->callbacks.find(key) == family->p->callbacks.end(),
|
|
"labels already registered as callback",
|
|
key.prometheus_format.empty() ? "(no labels)" : key.prometheus_format);
|
|
|
|
auto &ptr = family->p->instances[key];
|
|
if (!ptr) {
|
|
ptr = get_global_arena().construct<Gauge::State>();
|
|
ptr->value.store(0, std::memory_order_relaxed);
|
|
}
|
|
Gauge result;
|
|
result.p = ptr;
|
|
return result;
|
|
}
|
|
|
|
static Histogram create_histogram_instance(
|
|
Family<Histogram> *family,
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
// Force thread_local initialization
|
|
(void)thread_init;
|
|
|
|
std::unique_lock _{mutex};
|
|
++Metric::registration_version;
|
|
const LabelsKey &key = intern_labels(labels);
|
|
|
|
// Ensure thread state exists
|
|
auto thread_id = std::this_thread::get_id();
|
|
auto per_thread_it = family->p->per_thread_state.find(thread_id);
|
|
if (per_thread_it == family->p->per_thread_state.end()) {
|
|
// Create new PerThreadState with thread-local arena
|
|
auto result = family->p->per_thread_state.emplace(
|
|
thread_id,
|
|
Family<Histogram>::State::PerThreadState(get_thread_local_arena()));
|
|
per_thread_it = result.first;
|
|
}
|
|
|
|
auto &ptr = per_thread_it->second.instances[key];
|
|
if (!ptr) {
|
|
ptr = get_thread_local_arena().construct<Histogram::State>();
|
|
|
|
// DESIGN: Prometheus-compatible histogram buckets
|
|
// Use buckets from family configuration
|
|
size_t bucket_count = family->p->buckets.size();
|
|
double *thresholds_data =
|
|
get_thread_local_arena().allocate<double>(bucket_count);
|
|
uint64_t *counts_data =
|
|
get_thread_local_arena().allocate<uint64_t>(bucket_count);
|
|
|
|
// Copy thresholds and initialize counts
|
|
std::memcpy(thresholds_data, family->p->buckets.data(),
|
|
bucket_count * sizeof(double));
|
|
std::memset(counts_data, 0, bucket_count * sizeof(uint64_t));
|
|
|
|
ptr->thresholds = std::span<const double>(thresholds_data, bucket_count);
|
|
ptr->counts = std::span<uint64_t>(counts_data, bucket_count);
|
|
|
|
// Ensure global accumulator exists for this label set
|
|
auto &global_state = family->p->global_accumulated_values[key];
|
|
if (!global_state) {
|
|
global_state = get_global_arena().construct<Histogram::State>();
|
|
|
|
// Allocate and copy thresholds, initialize counts
|
|
double *global_thresholds_data =
|
|
get_global_arena().allocate<double>(bucket_count);
|
|
uint64_t *global_counts_data =
|
|
get_global_arena().allocate<uint64_t>(bucket_count);
|
|
|
|
std::memcpy(global_thresholds_data, ptr->thresholds.data(),
|
|
bucket_count * sizeof(double));
|
|
std::memset(global_counts_data, 0, bucket_count * sizeof(uint64_t));
|
|
|
|
global_state->thresholds =
|
|
std::span<const double>(global_thresholds_data, bucket_count);
|
|
global_state->counts =
|
|
std::span<uint64_t>(global_counts_data, bucket_count);
|
|
}
|
|
}
|
|
Histogram result;
|
|
result.p = ptr;
|
|
return result;
|
|
}
|
|
|
|
// Pre-computed data structures with resolved pointers to eliminate hash
|
|
// lookups
|
|
struct CounterLabelData {
|
|
LabelsKey labels_key;
|
|
ArenaVector<Counter::State *> thread_states; // Pre-resolved pointers
|
|
Counter::State *global_state; // Pre-resolved global state pointer
|
|
|
|
CounterLabelData(const LabelsKey &key, Arena &arena)
|
|
: labels_key(key), thread_states(&arena), global_state(nullptr) {}
|
|
};
|
|
|
|
struct GaugeLabelData {
|
|
LabelsKey labels_key;
|
|
Gauge::State *instance_state; // Direct pointer to gauge instance
|
|
|
|
GaugeLabelData(const LabelsKey &key)
|
|
: labels_key(key), instance_state(nullptr) {}
|
|
};
|
|
|
|
struct HistogramLabelData {
|
|
LabelsKey labels_key;
|
|
ArenaVector<Histogram::State *> thread_states; // Pre-resolved pointers
|
|
Histogram::State *global_state; // Pre-resolved global state pointer
|
|
size_t bucket_count; // Cache bucket count from family
|
|
|
|
HistogramLabelData(const LabelsKey &key, Arena &arena)
|
|
: labels_key(key), thread_states(&arena), global_state(nullptr),
|
|
bucket_count(0) {}
|
|
};
|
|
|
|
// Pre-computed data for each family type, built once and reused
|
|
struct LabelSets {
|
|
ArenaVector<ArenaVector<CounterLabelData>> counter_data;
|
|
ArenaVector<ArenaVector<GaugeLabelData>> gauge_data;
|
|
ArenaVector<ArenaVector<HistogramLabelData>> histogram_data;
|
|
explicit LabelSets(Arena &arena)
|
|
: counter_data(&arena), gauge_data(&arena), histogram_data(&arena) {}
|
|
};
|
|
|
|
// Instruction types for the execute phase
|
|
struct CallCounterCallback {
|
|
const MetricCallback<Counter>
|
|
*callback_ptr; // Safe: callback lifetime guaranteed by family map
|
|
};
|
|
|
|
struct CallGaugeCallback {
|
|
const MetricCallback<Gauge>
|
|
*callback_ptr; // Safe: callback lifetime guaranteed by family map
|
|
};
|
|
|
|
struct AggregateCounter {
|
|
std::span<Counter::State *const> thread_states;
|
|
Counter::State *global_state;
|
|
};
|
|
|
|
struct AggregateGauge {
|
|
Gauge::State *instance_state;
|
|
};
|
|
|
|
struct AggregateHistogram {
|
|
std::span<Histogram::State *const> thread_states;
|
|
Histogram::State *global_state;
|
|
size_t bucket_count;
|
|
std::span<const double> buckets; // For bucket threshold formatting
|
|
};
|
|
|
|
// Use a simpler enum-based approach to avoid variant issues
|
|
enum class InstructionType {
|
|
CALL_COUNTER_CALLBACK,
|
|
CALL_GAUGE_CALLBACK,
|
|
AGGREGATE_COUNTER,
|
|
AGGREGATE_GAUGE,
|
|
AGGREGATE_HISTOGRAM
|
|
};
|
|
|
|
struct RenderInstruction {
|
|
InstructionType type;
|
|
union {
|
|
CallCounterCallback counter_callback;
|
|
CallGaugeCallback gauge_callback;
|
|
AggregateCounter aggregate_counter;
|
|
AggregateGauge aggregate_gauge;
|
|
AggregateHistogram aggregate_histogram;
|
|
};
|
|
|
|
// Constructors
|
|
RenderInstruction(CallCounterCallback cb)
|
|
: type(InstructionType::CALL_COUNTER_CALLBACK) {
|
|
new (&counter_callback) CallCounterCallback(cb);
|
|
}
|
|
RenderInstruction(CallGaugeCallback cb)
|
|
: type(InstructionType::CALL_GAUGE_CALLBACK) {
|
|
new (&gauge_callback) CallGaugeCallback(cb);
|
|
}
|
|
RenderInstruction(AggregateCounter ac)
|
|
: type(InstructionType::AGGREGATE_COUNTER) {
|
|
new (&aggregate_counter) AggregateCounter(ac);
|
|
}
|
|
RenderInstruction(AggregateGauge ag)
|
|
: type(InstructionType::AGGREGATE_GAUGE) {
|
|
new (&aggregate_gauge) AggregateGauge(ag);
|
|
}
|
|
RenderInstruction(AggregateHistogram ah)
|
|
: type(InstructionType::AGGREGATE_HISTOGRAM) {
|
|
new (&aggregate_histogram) AggregateHistogram(ah);
|
|
}
|
|
|
|
// Destructor not needed, all instructions are trivially destructible
|
|
static_assert(std::is_trivially_destructible_v<CallCounterCallback>);
|
|
static_assert(std::is_trivially_destructible_v<CallGaugeCallback>);
|
|
static_assert(std::is_trivially_destructible_v<AggregateCounter>);
|
|
static_assert(std::is_trivially_destructible_v<AggregateGauge>);
|
|
static_assert(std::is_trivially_destructible_v<AggregateHistogram>);
|
|
};
|
|
|
|
// Three-phase rendering system
|
|
struct RenderPlan {
|
|
Arena arena;
|
|
ArenaVector<std::string_view> static_text{&arena};
|
|
ArenaVector<RenderInstruction> instructions{&arena};
|
|
uint64_t registration_version;
|
|
};
|
|
|
|
// Use to invalidate the render plan cache
|
|
static uint64_t registration_version;
|
|
static std::unique_ptr<RenderPlan> cached_plan;
|
|
|
|
// Phase 1: Compile phase - generate static text and instructions
|
|
static std::unique_ptr<RenderPlan> compile_render_plan() {
|
|
RenderPlan plan;
|
|
plan.registration_version = registration_version;
|
|
|
|
Metric::LabelSets label_sets = Metric::build_label_sets(plan.arena);
|
|
|
|
// Use temporary arena for formatting static text (will be interned to
|
|
// global arena)
|
|
Arena temp_arena(8192); // 8KB for temporary formatting
|
|
|
|
// Helper function to append an additional label to existing Prometheus
|
|
// format
|
|
auto append_label_to_format =
|
|
[&](std::string_view base_format, std::string_view key,
|
|
std::string_view value) -> std::string_view {
|
|
// Calculate size for key="value" with escaping
|
|
size_t key_value_size = key.length() + 3 + value.length(); // key="value"
|
|
for (char c : value) {
|
|
if (c == '\\' || c == '"' || c == '\n') {
|
|
key_value_size++;
|
|
}
|
|
}
|
|
|
|
if (base_format.empty()) {
|
|
// Create new format: {key="value"}
|
|
size_t required_size = 2 + key_value_size; // {}
|
|
char *buf = temp_arena.allocate<char>(required_size);
|
|
char *p = buf;
|
|
*p++ = '{';
|
|
std::memcpy(p, key.data(), key.length());
|
|
p += key.length();
|
|
*p++ = '=';
|
|
*p++ = '"';
|
|
for (char c : value) {
|
|
switch (c) {
|
|
case '\\':
|
|
*p++ = '\\';
|
|
*p++ = '\\';
|
|
break;
|
|
case '"':
|
|
*p++ = '\\';
|
|
*p++ = '"';
|
|
break;
|
|
case '\n':
|
|
*p++ = '\\';
|
|
*p++ = 'n';
|
|
break;
|
|
default:
|
|
*p++ = c;
|
|
break;
|
|
}
|
|
}
|
|
*p++ = '"';
|
|
*p++ = '}';
|
|
return std::string_view(buf, p - buf);
|
|
} else {
|
|
// Append to existing format: {existing,key="value"}
|
|
size_t required_size = base_format.length() + 1 +
|
|
key_value_size; // comma + key="value", replace }
|
|
char *buf = temp_arena.allocate<char>(required_size);
|
|
char *p = buf;
|
|
// Copy everything except the closing }
|
|
std::memcpy(p, base_format.data(), base_format.length() - 1);
|
|
p += base_format.length() - 1;
|
|
*p++ = ',';
|
|
std::memcpy(p, key.data(), key.length());
|
|
p += key.length();
|
|
*p++ = '=';
|
|
*p++ = '"';
|
|
for (char c : value) {
|
|
switch (c) {
|
|
case '\\':
|
|
*p++ = '\\';
|
|
*p++ = '\\';
|
|
break;
|
|
case '"':
|
|
*p++ = '\\';
|
|
*p++ = '"';
|
|
break;
|
|
case '\n':
|
|
*p++ = '\\';
|
|
*p++ = 'n';
|
|
break;
|
|
default:
|
|
*p++ = c;
|
|
break;
|
|
}
|
|
}
|
|
*p++ = '"';
|
|
*p++ = '}';
|
|
return std::string_view(buf, p - buf);
|
|
}
|
|
};
|
|
|
|
// Track if this is the first static text entry (no leading newline)
|
|
bool is_first_static = true;
|
|
|
|
// Generate counters
|
|
size_t counter_family_idx = 0;
|
|
for (const auto &[name, family] : get_counter_families()) {
|
|
// Add HELP line
|
|
auto help_line = format(
|
|
temp_arena, "%s# HELP %.*s %.*s\n# TYPE %.*s counter",
|
|
is_first_static ? "" : "\n", static_cast<int>(name.length()),
|
|
name.data(), static_cast<int>(family->help.length()),
|
|
family->help.data(), static_cast<int>(name.length()), name.data());
|
|
is_first_static = false;
|
|
|
|
// Callback instructions and static text
|
|
for (const auto &[labels_key, callback] : family->callbacks) {
|
|
plan.instructions.push_back(CallCounterCallback{&callback});
|
|
auto static_text = format(
|
|
temp_arena, "%.*s\n%.*s%.*s ", static_cast<int>(help_line.size()),
|
|
help_line.data(), static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(labels_key.prometheus_format.length()),
|
|
labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(static_text));
|
|
help_line = "";
|
|
}
|
|
|
|
// Instance instructions and static text
|
|
const auto &family_data = label_sets.counter_data[counter_family_idx++];
|
|
for (const auto &data : family_data) {
|
|
plan.instructions.push_back(
|
|
AggregateCounter{data.thread_states, data.global_state});
|
|
auto static_text = format(
|
|
temp_arena, "%.*s\n%.*s%.*s ", static_cast<int>(help_line.size()),
|
|
help_line.data(), static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(data.labels_key.prometheus_format.length()),
|
|
data.labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(static_text));
|
|
help_line = "";
|
|
}
|
|
}
|
|
|
|
// Generate gauges
|
|
size_t gauge_family_idx = 0;
|
|
for (const auto &[name, family] : get_gauge_families()) {
|
|
// Add HELP line
|
|
auto help_line = format(
|
|
temp_arena, "%s# HELP %.*s %.*s\n# TYPE %.*s gauge",
|
|
is_first_static ? "" : "\n", static_cast<int>(name.length()),
|
|
name.data(), static_cast<int>(family->help.length()),
|
|
family->help.data(), static_cast<int>(name.length()), name.data());
|
|
is_first_static = false;
|
|
|
|
// Callback instructions and static text
|
|
for (const auto &[labels_key, callback] : family->callbacks) {
|
|
plan.instructions.push_back(CallGaugeCallback{&callback});
|
|
auto static_text = format(
|
|
temp_arena, "%.*s\n%.*s%.*s ", static_cast<int>(help_line.size()),
|
|
help_line.data(), static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(labels_key.prometheus_format.length()),
|
|
labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(static_text));
|
|
help_line = "";
|
|
}
|
|
|
|
// Instance instructions and static text
|
|
const auto &family_data = label_sets.gauge_data[gauge_family_idx++];
|
|
for (const auto &data : family_data) {
|
|
plan.instructions.push_back(AggregateGauge{data.instance_state});
|
|
auto static_text = format(
|
|
temp_arena, "%.*s\n%.*s%.*s ", static_cast<int>(help_line.size()),
|
|
help_line.data(), static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(data.labels_key.prometheus_format.length()),
|
|
data.labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(static_text));
|
|
help_line = "";
|
|
}
|
|
}
|
|
|
|
// Generate histograms
|
|
size_t histogram_family_idx = 0;
|
|
for (const auto &[name, family] : get_histogram_families()) {
|
|
auto help_line = format(
|
|
temp_arena, "%s# HELP %.*s %.*s\n# TYPE %.*s histogram",
|
|
is_first_static ? "" : "\n", static_cast<int>(name.length()),
|
|
name.data(), static_cast<int>(family->help.length()),
|
|
family->help.data(), static_cast<int>(name.length()), name.data());
|
|
|
|
const auto &family_data =
|
|
label_sets.histogram_data[histogram_family_idx++];
|
|
for (const auto &data : family_data) {
|
|
plan.instructions.push_back(
|
|
AggregateHistogram{data.thread_states, data.global_state,
|
|
data.bucket_count, family->buckets});
|
|
|
|
// Static text for explicit buckets
|
|
for (size_t i = 0; i < data.bucket_count; ++i) {
|
|
auto bucket_value = static_format(temp_arena, family->buckets[i]);
|
|
auto labels = append_label_to_format(
|
|
data.labels_key.prometheus_format, "le", bucket_value);
|
|
auto static_text =
|
|
format(temp_arena, "%.*s\n%.*s_bucket%.*s ",
|
|
static_cast<int>(help_line.size()), help_line.data(),
|
|
static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(labels.length()), labels.data());
|
|
plan.static_text.push_back(intern_static_text(static_text));
|
|
help_line = "";
|
|
}
|
|
|
|
// Static text for +Inf bucket
|
|
auto inf_labels = append_label_to_format(
|
|
data.labels_key.prometheus_format, "le", "+Inf");
|
|
auto inf_static_text =
|
|
format(temp_arena, "\n%.*s_bucket%.*s ",
|
|
static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(inf_labels.length()), inf_labels.data());
|
|
plan.static_text.push_back(intern_static_text(inf_static_text));
|
|
|
|
// Static text for sum
|
|
auto sum_static_text =
|
|
format(temp_arena, "\n%.*s_sum%.*s ",
|
|
static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(data.labels_key.prometheus_format.length()),
|
|
data.labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(sum_static_text));
|
|
|
|
// Static text for count
|
|
auto count_static_text =
|
|
format(temp_arena, "\n%.*s_count%.*s ",
|
|
static_cast<int>(name.length()), name.data(),
|
|
static_cast<int>(data.labels_key.prometheus_format.length()),
|
|
data.labels_key.prometheus_format.data());
|
|
plan.static_text.push_back(intern_static_text(count_static_text));
|
|
}
|
|
}
|
|
|
|
return std::make_unique<RenderPlan>(std::move(plan));
|
|
}
|
|
|
|
// Phase 2: Execute phase - run instructions and generate dynamic text
|
|
static ArenaVector<std::string_view>
|
|
execute_render_plan(Arena &arena,
|
|
const ArenaVector<RenderInstruction> &instructions) {
|
|
ArenaVector<std::string_view> dynamic_text(&arena);
|
|
|
|
for (const auto &instruction : instructions) {
|
|
switch (instruction.type) {
|
|
case InstructionType::CALL_COUNTER_CALLBACK: {
|
|
double value = (*instruction.counter_callback.callback_ptr)();
|
|
dynamic_text.push_back(static_format(arena, value));
|
|
break;
|
|
}
|
|
case InstructionType::CALL_GAUGE_CALLBACK: {
|
|
double value = (*instruction.gauge_callback.callback_ptr)();
|
|
dynamic_text.push_back(static_format(arena, value));
|
|
break;
|
|
}
|
|
case InstructionType::AGGREGATE_COUNTER: {
|
|
double total_value = 0.0;
|
|
// Sum thread-local values
|
|
for (auto *state_ptr : instruction.aggregate_counter.thread_states) {
|
|
double value;
|
|
// NOTE: __atomic_load works on plain double (not atomic<double>)
|
|
__atomic_load(&state_ptr->value, &value, __ATOMIC_RELAXED);
|
|
total_value += value;
|
|
}
|
|
// Add global accumulated value
|
|
if (instruction.aggregate_counter.global_state) {
|
|
total_value += instruction.aggregate_counter.global_state->value;
|
|
}
|
|
dynamic_text.push_back(static_format(arena, total_value));
|
|
break;
|
|
}
|
|
case InstructionType::AGGREGATE_GAUGE: {
|
|
double value = std::bit_cast<double>(
|
|
instruction.aggregate_gauge.instance_state->value.load(
|
|
std::memory_order_relaxed));
|
|
dynamic_text.push_back(static_format(arena, value));
|
|
break;
|
|
}
|
|
case InstructionType::AGGREGATE_HISTOGRAM: {
|
|
// Aggregate histogram data
|
|
size_t bucket_count = instruction.aggregate_histogram.bucket_count;
|
|
uint64_t *total_counts_data = arena.allocate<uint64_t>(bucket_count);
|
|
std::memset(total_counts_data, 0, bucket_count * sizeof(uint64_t));
|
|
std::span<uint64_t> total_counts(total_counts_data, bucket_count);
|
|
double total_sum = 0.0;
|
|
uint64_t total_observations = 0;
|
|
|
|
// Sum thread-local values
|
|
for (auto *instance : instruction.aggregate_histogram.thread_states) {
|
|
uint64_t *counts_snapshot = arena.allocate<uint64_t>(bucket_count);
|
|
double sum_snapshot;
|
|
uint64_t observations_snapshot;
|
|
|
|
{
|
|
std::lock_guard lock(instance->mutex);
|
|
for (size_t i = 0; i < instance->counts.size(); ++i) {
|
|
counts_snapshot[i] = instance->counts[i];
|
|
}
|
|
sum_snapshot = instance->sum;
|
|
observations_snapshot = instance->observations;
|
|
}
|
|
|
|
for (size_t i = 0; i < bucket_count; ++i) {
|
|
total_counts[i] += counts_snapshot[i];
|
|
}
|
|
total_sum += sum_snapshot;
|
|
total_observations += observations_snapshot;
|
|
}
|
|
|
|
// Add global accumulated values
|
|
if (instruction.aggregate_histogram.global_state) {
|
|
auto *global_state = instruction.aggregate_histogram.global_state;
|
|
for (size_t i = 0; i < global_state->counts.size(); ++i) {
|
|
total_counts[i] += global_state->counts[i];
|
|
}
|
|
total_sum += global_state->sum;
|
|
total_observations += global_state->observations;
|
|
}
|
|
|
|
// Format explicit bucket counts
|
|
for (size_t i = 0; i < total_counts.size(); ++i) {
|
|
dynamic_text.push_back(static_format(arena, total_counts[i]));
|
|
}
|
|
// Format +Inf bucket (total observations)
|
|
dynamic_text.push_back(static_format(arena, total_observations));
|
|
// Format sum
|
|
dynamic_text.push_back(static_format(arena, total_sum));
|
|
// Format count
|
|
dynamic_text.push_back(static_format(arena, total_observations));
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return dynamic_text;
|
|
}
|
|
|
|
// Phase 3: Present phase - interleave static and dynamic text
|
|
static ArenaVector<std::string_view>
|
|
present_render_output(Arena &arena,
|
|
const ArenaVector<std::string_view> &static_text,
|
|
const ArenaVector<std::string_view> &dynamic_text) {
|
|
ArenaVector<std::string_view> output(&arena);
|
|
|
|
for (size_t i = 0; i < static_text.size(); ++i) {
|
|
// Static text is interned in global arena (application lifetime)
|
|
// Safe to reference directly without copying since:
|
|
// - global arena lifetime > caller arena lifetime ≥ output usage lifetime
|
|
// - static text is truly static for a given registry state
|
|
output.push_back(static_text[i]);
|
|
|
|
// Add corresponding dynamic text (already in caller's arena)
|
|
output.push_back(dynamic_text[i]);
|
|
}
|
|
// Trailing newline
|
|
output.push_back("\n");
|
|
|
|
return output;
|
|
}
|
|
|
|
// Build label sets once for reuse in both phases
|
|
static LabelSets build_label_sets(Arena &arena) {
|
|
LabelSets label_sets{arena};
|
|
|
|
// Build counter data with pre-resolved pointers
|
|
for (const auto &[name, family] : Metric::get_counter_families()) {
|
|
// Collect all unique labels first
|
|
std::set<LabelsKey, std::less<LabelsKey>, ArenaStlAllocator<LabelsKey>>
|
|
all_labels{ArenaStlAllocator<LabelsKey>(&arena)};
|
|
|
|
for (const auto &[thread_id, per_thread] : family->per_thread_state) {
|
|
for (const auto &[labels_key, instance] : per_thread.instances) {
|
|
all_labels.insert(labels_key);
|
|
}
|
|
}
|
|
for (const auto &[labels_key, global_state] :
|
|
family->global_accumulated_values) {
|
|
if (global_state) {
|
|
all_labels.insert(labels_key);
|
|
}
|
|
}
|
|
|
|
// Pre-resolve all pointers for each label set
|
|
ArenaVector<CounterLabelData> family_data{&arena};
|
|
for (const auto &labels_key : all_labels) {
|
|
CounterLabelData data{labels_key, arena};
|
|
|
|
// Pre-resolve thread-local state pointers
|
|
for (const auto &[thread_id, per_thread] : family->per_thread_state) {
|
|
auto it = per_thread.instances.find(labels_key);
|
|
if (it != per_thread.instances.end()) {
|
|
data.thread_states.push_back(it->second);
|
|
}
|
|
}
|
|
|
|
// Pre-resolve global accumulated state pointer
|
|
auto global_it = family->global_accumulated_values.find(labels_key);
|
|
data.global_state =
|
|
(global_it != family->global_accumulated_values.end() &&
|
|
global_it->second)
|
|
? global_it->second
|
|
: nullptr;
|
|
|
|
family_data.push_back(std::move(data));
|
|
}
|
|
label_sets.counter_data.push_back(std::move(family_data));
|
|
}
|
|
|
|
// Build gauge data with pre-resolved pointers
|
|
for (const auto &[name, family] : Metric::get_gauge_families()) {
|
|
ArenaVector<GaugeLabelData> family_data{&arena};
|
|
|
|
// Gauges iterate directly over instances
|
|
for (const auto &[labels_key, instance] : family->instances) {
|
|
GaugeLabelData data(labels_key);
|
|
data.instance_state = instance;
|
|
family_data.push_back(std::move(data));
|
|
}
|
|
|
|
label_sets.gauge_data.push_back(std::move(family_data));
|
|
}
|
|
|
|
// Build histogram data with pre-resolved pointers
|
|
for (const auto &[name, family] : Metric::get_histogram_families()) {
|
|
// Collect all unique labels first
|
|
std::set<LabelsKey, std::less<LabelsKey>, ArenaStlAllocator<LabelsKey>>
|
|
all_labels{ArenaStlAllocator<LabelsKey>(&arena)};
|
|
|
|
for (const auto &[thread_id, per_thread] : family->per_thread_state) {
|
|
for (const auto &[labels_key, instance] : per_thread.instances) {
|
|
all_labels.insert(labels_key);
|
|
}
|
|
}
|
|
for (const auto &[labels_key, global_state] :
|
|
family->global_accumulated_values) {
|
|
if (global_state) {
|
|
all_labels.insert(labels_key);
|
|
}
|
|
}
|
|
|
|
// Pre-resolve all pointers for each label set
|
|
ArenaVector<HistogramLabelData> family_data{&arena};
|
|
for (const auto &labels_key : all_labels) {
|
|
HistogramLabelData data(labels_key, arena);
|
|
data.bucket_count = family->buckets.size(); // Cache bucket count
|
|
|
|
// Pre-resolve thread-local state pointers
|
|
for (const auto &[thread_id, per_thread] : family->per_thread_state) {
|
|
auto it = per_thread.instances.find(labels_key);
|
|
if (it != per_thread.instances.end()) {
|
|
data.thread_states.push_back(it->second);
|
|
}
|
|
}
|
|
|
|
// Pre-resolve global accumulated state pointer
|
|
auto global_it = family->global_accumulated_values.find(labels_key);
|
|
data.global_state =
|
|
(global_it != family->global_accumulated_values.end() &&
|
|
global_it->second)
|
|
? global_it->second
|
|
: nullptr;
|
|
|
|
family_data.push_back(std::move(data));
|
|
}
|
|
label_sets.histogram_data.push_back(std::move(family_data));
|
|
}
|
|
|
|
return label_sets;
|
|
}
|
|
};
|
|
|
|
Counter::Counter() = default;
|
|
|
|
void Counter::inc(double x) {
|
|
// THREAD-SAFETY: This method mixes a non-atomic read and an atomic store
|
|
// which is safe ONLY under the following conditions, which this system meets:
|
|
//
|
|
// 1. Single-Writer Guarantee: The underlying Counter::State is thread-local.
|
|
// Only one thread will ever call inc() on a given instance. All writes
|
|
// and the non-atomic read below are therefore *sequenced*, not concurrent,
|
|
// preventing torn reads within this thread.
|
|
//
|
|
// 2. Atomic Visibility: The render thread is the only other thread that
|
|
// accesses this value, and it does so via an atomic load. A concurrent
|
|
// non-atomic read (writer) and atomic read (renderer) is not a data race.
|
|
//
|
|
// 3. Overflow Check Safety: Both non-atomic reads of p->value are sequential
|
|
// in the same thread, so no races occur.
|
|
//
|
|
// This contrasts with Gauges, whose state can be shared by multiple threads
|
|
// and thus requires a fully atomic read-modify-write cycle (CAS loop).
|
|
auto new_value = p->value + x;
|
|
|
|
// Validate monotonic property (counter never decreases)
|
|
// Safe: both reads of p->value are sequential in this thread
|
|
if (new_value < p->value) [[unlikely]] {
|
|
validate_or_abort(false, "counter value overflow/wraparound detected",
|
|
std::to_string(new_value));
|
|
}
|
|
|
|
__atomic_store(&p->value, &new_value, __ATOMIC_RELAXED);
|
|
}
|
|
|
|
Gauge::Gauge() = default;
|
|
|
|
void Gauge::inc(double x) {
|
|
// Lock-free increment using CAS loop
|
|
uint64_t expected = p->value.load(std::memory_order_relaxed);
|
|
uint64_t desired;
|
|
do {
|
|
double current_value = std::bit_cast<double>(expected);
|
|
double new_value = current_value + x;
|
|
desired = std::bit_cast<uint64_t>(new_value);
|
|
} while (!p->value.compare_exchange_weak(expected, desired,
|
|
std::memory_order_relaxed));
|
|
}
|
|
void Gauge::dec(double x) {
|
|
// Lock-free decrement using CAS loop
|
|
uint64_t expected = p->value.load(std::memory_order_relaxed);
|
|
uint64_t desired;
|
|
do {
|
|
double current_value = std::bit_cast<double>(expected);
|
|
double new_value = current_value - x;
|
|
desired = std::bit_cast<uint64_t>(new_value);
|
|
} while (!p->value.compare_exchange_weak(expected, desired,
|
|
std::memory_order_relaxed));
|
|
}
|
|
void Gauge::set(double x) {
|
|
// Simple atomic store for set operation
|
|
p->value.store(std::bit_cast<uint64_t>(x), std::memory_order_relaxed);
|
|
}
|
|
|
|
Histogram::Histogram() = default;
|
|
|
|
// Vectorized histogram bucket updates with mutex protection for consistency
|
|
// AVX-optimized implementation for high performance
|
|
|
|
__attribute__((target("avx"))) static void
|
|
update_histogram_buckets_simd(std::span<const double> thresholds,
|
|
std::span<uint64_t> counts, double x,
|
|
size_t start_idx) {
|
|
const size_t size = thresholds.size();
|
|
size_t i = start_idx;
|
|
|
|
// Process 2 buckets at a time with 128-bit vectors
|
|
const __m128d x_vec = _mm_set1_pd(x);
|
|
|
|
for (; i + 2 <= size; i += 2) {
|
|
// 128-bit vectorized comparison and arithmetic
|
|
__m128d thresholds_vec = _mm_loadu_pd(&thresholds[i]);
|
|
__m128d cmp_result = _mm_cmp_pd(x_vec, thresholds_vec, _CMP_LE_OQ);
|
|
__m128i cmp_as_int = _mm_castpd_si128(cmp_result);
|
|
__m128i ones = _mm_set1_epi64x(1);
|
|
// NOTE: _mm_cmp_pd returns 0xFFFFFFFFFFFFFFFF for true, 0x0000000000000000
|
|
// for false The AND with ones converts this to 1 or 0 per 64-bit lane
|
|
// (extracts low bit)
|
|
__m128i increments = _mm_and_si128(cmp_as_int, ones);
|
|
|
|
// Load current counts and add increments
|
|
// NOTE: Using unaligned load/store to avoid pointer casting issues
|
|
// uint64_t arrays from arena allocator are not guaranteed 16-byte aligned
|
|
__m128i current_counts;
|
|
std::memcpy(¤t_counts, &counts[i], sizeof(__m128i));
|
|
__m128i updated_counts = _mm_add_epi64(current_counts, increments);
|
|
|
|
// Store updated counts
|
|
std::memcpy(&counts[i], &updated_counts, sizeof(__m128i));
|
|
}
|
|
|
|
// Handle remainder with scalar operations
|
|
for (; i < size; ++i) {
|
|
if (x <= thresholds[i]) {
|
|
counts[i]++;
|
|
}
|
|
}
|
|
}
|
|
|
|
void Histogram::observe(double x) {
|
|
assert(p->thresholds.size() == p->counts.size());
|
|
|
|
std::lock_guard lock(p->mutex);
|
|
|
|
// Update bucket counts using SIMD
|
|
update_histogram_buckets_simd(p->thresholds, p->counts, x, 0);
|
|
|
|
// Update sum and observation count
|
|
p->sum += x;
|
|
p->observations++;
|
|
}
|
|
|
|
template <> Family<Counter>::Family() = default;
|
|
template <> Family<Gauge>::Family() = default;
|
|
template <> Family<Histogram>::Family() = default;
|
|
|
|
template <>
|
|
Counter Family<Counter>::create(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
return Metric::create_counter_instance(this, labels);
|
|
}
|
|
|
|
template <>
|
|
Gauge Family<Gauge>::create(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
return Metric::create_gauge_instance(this, labels);
|
|
}
|
|
|
|
template <>
|
|
Histogram Family<Histogram>::create(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels) {
|
|
return Metric::create_histogram_instance(this, labels);
|
|
}
|
|
|
|
Family<Counter> create_counter(std::string_view name, std::string_view help) {
|
|
validate_or_abort(is_valid_metric_name(name), "invalid counter name", name);
|
|
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
auto &global_arena = Metric::get_global_arena();
|
|
auto name_view = global_arena.copy_string(name);
|
|
auto &familyPtr = Metric::get_counter_families()[name_view];
|
|
if (!familyPtr) {
|
|
familyPtr = global_arena.construct<Family<Counter>::State>(global_arena);
|
|
familyPtr->name = name_view;
|
|
familyPtr->help = global_arena.copy_string(help);
|
|
} else {
|
|
validate_or_abort(
|
|
familyPtr->help == help,
|
|
"metric family already registered with different help text", name);
|
|
}
|
|
Family<Counter> family;
|
|
family.p = familyPtr.get();
|
|
return family;
|
|
}
|
|
|
|
Family<Gauge> create_gauge(std::string_view name, std::string_view help) {
|
|
validate_or_abort(is_valid_metric_name(name), "invalid gauge name", name);
|
|
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
auto &global_arena = Metric::get_global_arena();
|
|
auto name_view = global_arena.copy_string(name);
|
|
auto &familyPtr = Metric::get_gauge_families()[name_view];
|
|
if (!familyPtr) {
|
|
// Family<T>::State instances use Arena::Ptr for automatic cleanup
|
|
familyPtr = global_arena.construct<Family<Gauge>::State>(global_arena);
|
|
familyPtr->name = name_view;
|
|
familyPtr->help = global_arena.copy_string(help);
|
|
} else {
|
|
validate_or_abort(
|
|
familyPtr->help == help,
|
|
"metric family already registered with different help text", name);
|
|
}
|
|
Family<Gauge> family;
|
|
family.p = familyPtr.get();
|
|
return family;
|
|
}
|
|
|
|
Family<Histogram> create_histogram(std::string_view name, std::string_view help,
|
|
std::span<const double> buckets) {
|
|
validate_or_abort(is_valid_metric_name(name), "invalid histogram name", name);
|
|
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
auto &global_arena = Metric::get_global_arena();
|
|
auto name_view = global_arena.copy_string(name);
|
|
auto &family_ptr = Metric::get_histogram_families()[name_view];
|
|
if (!family_ptr) {
|
|
// Family<T>::State instances use Arena::Ptr for automatic cleanup
|
|
family_ptr = global_arena.construct<Family<Histogram>::State>(global_arena);
|
|
family_ptr->name = name_view;
|
|
family_ptr->help = global_arena.copy_string(help);
|
|
|
|
// DESIGN: Prometheus-compatible histogram buckets
|
|
// Convert to vector for sorting
|
|
std::vector<double> temp_buckets(buckets.begin(), buckets.end());
|
|
std::sort(temp_buckets.begin(), temp_buckets.end());
|
|
temp_buckets.erase(std::unique(temp_buckets.begin(), temp_buckets.end()),
|
|
temp_buckets.end());
|
|
|
|
// Copy sorted buckets to arena vector
|
|
for (double bucket : temp_buckets) {
|
|
family_ptr->buckets.push_back(bucket);
|
|
}
|
|
// Note: +Inf bucket is not stored explicitly - we use total observations
|
|
// count
|
|
} else {
|
|
validate_or_abort(
|
|
family_ptr->help == help,
|
|
"metric family already registered with different help text", name);
|
|
std::vector<double> new_buckets_vec(buckets.begin(), buckets.end());
|
|
std::sort(new_buckets_vec.begin(), new_buckets_vec.end());
|
|
new_buckets_vec.erase(
|
|
std::unique(new_buckets_vec.begin(), new_buckets_vec.end()),
|
|
new_buckets_vec.end());
|
|
// Note: +Inf bucket is not stored explicitly - we use total observations
|
|
// count
|
|
|
|
// Compare with existing buckets
|
|
bool buckets_match = (family_ptr->buckets.size() == new_buckets_vec.size());
|
|
if (buckets_match) {
|
|
for (size_t i = 0; i < family_ptr->buckets.size(); ++i) {
|
|
if (family_ptr->buckets[i] != new_buckets_vec[i]) {
|
|
buckets_match = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
validate_or_abort(buckets_match,
|
|
"metric family already registered with different buckets",
|
|
name);
|
|
}
|
|
Family<Histogram> family;
|
|
family.p = family_ptr.get();
|
|
return family;
|
|
}
|
|
|
|
std::vector<double> linear_buckets(double start, double width, int count) {
|
|
validate_or_abort(width > 0, "linear bucket width must be positive",
|
|
std::to_string(width));
|
|
validate_or_abort(count >= 0, "linear bucket count must be non-negative",
|
|
std::to_string(count));
|
|
|
|
std::vector<double> buckets;
|
|
buckets.reserve(count);
|
|
|
|
for (int i = 0; i < count; ++i) {
|
|
buckets.push_back(start + i * width);
|
|
}
|
|
|
|
return buckets;
|
|
}
|
|
|
|
std::vector<double> exponential_buckets(double start, double factor,
|
|
int count) {
|
|
validate_or_abort(start > 0, "exponential bucket start must be positive",
|
|
std::to_string(start));
|
|
validate_or_abort(factor > 1, "exponential bucket factor must be > 1",
|
|
std::to_string(factor));
|
|
validate_or_abort(count >= 0, "exponential bucket count must be non-negative",
|
|
std::to_string(count));
|
|
|
|
std::vector<double> buckets;
|
|
buckets.reserve(count);
|
|
|
|
double current = start;
|
|
for (int i = 0; i < count; ++i) {
|
|
buckets.push_back(current);
|
|
current *= factor;
|
|
}
|
|
|
|
return buckets;
|
|
}
|
|
|
|
// Prometheus validation functions
|
|
// Metric names must match [a-zA-Z_:][a-zA-Z0-9_:]*
|
|
bool is_valid_metric_name(std::string_view name) {
|
|
if (name.empty())
|
|
return false;
|
|
|
|
// First character must be letter, underscore, or colon
|
|
char first = name[0];
|
|
if (!std::isalpha(first) && first != '_' && first != ':') {
|
|
return false;
|
|
}
|
|
|
|
// Remaining characters must be alphanumeric, underscore, or colon
|
|
for (size_t i = 1; i < name.size(); ++i) {
|
|
char c = name[i];
|
|
if (!std::isalnum(c) && c != '_' && c != ':') {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
// Label keys must match [a-zA-Z_][a-zA-Z0-9_]*
|
|
bool is_valid_label_key(std::string_view key) {
|
|
if (key.empty())
|
|
return false;
|
|
|
|
// First character must be letter or underscore
|
|
char first = key[0];
|
|
if (!std::isalpha(first) && first != '_') {
|
|
return false;
|
|
}
|
|
|
|
// Remaining characters must be alphanumeric or underscore
|
|
for (size_t i = 1; i < key.size(); ++i) {
|
|
char c = key[i];
|
|
if (!std::isalnum(c) && c != '_') {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// Label keys starting with __ are reserved for internal use
|
|
if (key.size() >= 2 && key[0] == '_' && key[1] == '_') {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
// Label values can contain any UTF-8 characters (no specific restrictions)
|
|
bool is_valid_label_value(std::string_view value) {
|
|
// Prometheus allows any UTF-8 string as label value
|
|
// Validate UTF-8 encoding for correctness using simdutf
|
|
return simdutf::validate_utf8(value.data(), value.size());
|
|
}
|
|
|
|
union MetricValue {
|
|
double as_double;
|
|
uint64_t as_uint64;
|
|
};
|
|
|
|
// Memory usage calculation callback for metrics system self-monitoring
|
|
static double calculate_metrics_memory_usage() {
|
|
std::size_t total_bytes = 0;
|
|
|
|
// 1. Global arena memory
|
|
total_bytes += Metric::get_global_arena().total_allocated();
|
|
|
|
// 2. Per-thread arenas (safe because we're already holding the global mutex)
|
|
for (const auto &[thread_id, arena_ptr] : Metric::get_thread_arenas()) {
|
|
total_bytes += arena_ptr->total_allocated();
|
|
}
|
|
|
|
// 3. Cached plan arena
|
|
if (Metric::cached_plan) {
|
|
total_bytes += Metric::cached_plan->arena.total_allocated();
|
|
}
|
|
|
|
return static_cast<double>(total_bytes);
|
|
}
|
|
|
|
// New three-phase render implementation
|
|
std::span<std::string_view> render(Arena &arena) {
|
|
// Initialize self-monitoring metrics (before taking global lock)
|
|
static auto memory_gauge = []() {
|
|
auto gauge = create_gauge("weaseldb_metrics_memory_bytes",
|
|
"Total memory usage of the metrics system "
|
|
"including global and per-thread arenas");
|
|
gauge.register_callback({}, calculate_metrics_memory_usage);
|
|
return gauge;
|
|
}();
|
|
(void)memory_gauge; // Suppress unused variable warning
|
|
|
|
// Hold lock throughout all phases to prevent registry changes
|
|
// THREAD SAFETY: Global mutex protects cached_plan initialization and access,
|
|
// prevents races during static member initialization at program startup
|
|
std::unique_lock _{Metric::mutex};
|
|
|
|
// Call all registered collectors to update their metrics
|
|
for (const auto &collector : Metric::get_collectors()) {
|
|
collector->collect();
|
|
}
|
|
|
|
// Phase 1: Compile - generate static text and instructions
|
|
// Safe: cached_plan access/initialization protected by mutex above
|
|
if (!Metric::cached_plan || Metric::cached_plan->registration_version !=
|
|
Metric::registration_version) {
|
|
Metric::cached_plan = Metric::compile_render_plan();
|
|
}
|
|
|
|
// Phase 2: Execute - run instructions and generate dynamic text
|
|
ArenaVector<std::string_view> dynamic_text =
|
|
Metric::execute_render_plan(arena, Metric::cached_plan->instructions);
|
|
|
|
// Phase 3: Present - interleave static and dynamic text
|
|
ArenaVector<std::string_view> output = Metric::present_render_output(
|
|
arena, Metric::cached_plan->static_text, dynamic_text);
|
|
|
|
return output;
|
|
}
|
|
|
|
// Template specialization implementations for register_callback
|
|
template <>
|
|
void Family<Counter>::register_callback(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels,
|
|
MetricCallback<Counter> callback) {
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
const LabelsKey &key = Metric::intern_labels(labels);
|
|
|
|
// Validate that labels aren't already in use by create() calls
|
|
for (const auto &[thread_id, per_thread] : p->per_thread_state) {
|
|
validate_or_abort(
|
|
per_thread.instances.find(key) == per_thread.instances.end(),
|
|
"labels already registered as static instance",
|
|
key.prometheus_format.empty() ? "(no labels)" : key.prometheus_format);
|
|
}
|
|
|
|
// Validate that callback isn't already registered for these labels
|
|
validate_or_abort(p->callbacks.find(key) == p->callbacks.end(),
|
|
"callback already registered for labels",
|
|
key.prometheus_format.empty() ? "(no labels)"
|
|
: key.prometheus_format);
|
|
|
|
p->callbacks[std::move(key)] = std::move(callback);
|
|
}
|
|
|
|
template <>
|
|
void Family<Gauge>::register_callback(
|
|
std::span<const std::pair<std::string_view, std::string_view>> labels,
|
|
MetricCallback<Gauge> callback) {
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
const LabelsKey &key = Metric::intern_labels(labels);
|
|
|
|
// Validate that labels aren't already in use by create() calls
|
|
validate_or_abort(p->instances.find(key) == p->instances.end(),
|
|
"labels already registered as static instance",
|
|
key.prometheus_format.empty() ? "(no labels)"
|
|
: key.prometheus_format);
|
|
|
|
// Validate that callback isn't already registered for these labels
|
|
validate_or_abort(p->callbacks.find(key) == p->callbacks.end(),
|
|
"callback already registered for labels",
|
|
key.prometheus_format.empty() ? "(no labels)"
|
|
: key.prometheus_format);
|
|
|
|
p->callbacks[std::move(key)] = std::move(callback);
|
|
}
|
|
|
|
// Explicit template instantiations to provide member implementations
|
|
|
|
// Static member definitions
|
|
std::mutex Metric::mutex;
|
|
uint64_t Metric::registration_version;
|
|
std::unique_ptr<Metric::RenderPlan> Metric::cached_plan;
|
|
thread_local Metric::ThreadInit Metric::thread_init;
|
|
|
|
void reset_metrics_for_testing() {
|
|
std::lock_guard _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
|
|
Metric::get_counter_families().clear();
|
|
Metric::get_gauge_families().clear();
|
|
Metric::get_histogram_families().clear();
|
|
Metric::get_interned_labels().clear();
|
|
Metric::get_interned_static_text().clear();
|
|
|
|
Metric::get_global_arena().reset();
|
|
|
|
auto reset_arena = [](auto &m) {
|
|
using M = std::decay_t<decltype(m)>;
|
|
using A = M::allocator_type;
|
|
m = M{A{&Metric::get_global_arena()}};
|
|
};
|
|
|
|
reset_arena(Metric::get_counter_families());
|
|
reset_arena(Metric::get_gauge_families());
|
|
reset_arena(Metric::get_histogram_families());
|
|
reset_arena(Metric::get_interned_labels());
|
|
reset_arena(Metric::get_interned_static_text());
|
|
|
|
// Note: Thread-local arenas will be cleaned up by ThreadInit destructors
|
|
// when threads exit naturally
|
|
}
|
|
|
|
void register_collector(Ref<Collector> collector) {
|
|
std::unique_lock _{Metric::mutex};
|
|
++Metric::registration_version;
|
|
Metric::get_collectors().push_back(std::move(collector));
|
|
}
|
|
|
|
} // namespace metric
|