Use int64_t internally for metrics
Some checks failed
Tests / Clang total: 1499, passed: 1499
Clang |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Debug total: 1497, passed: 1497
Tests / SIMD fallback total: 1499, passed: 1499
Tests / Release [gcc] total: 1499, passed: 1499
GNU C Compiler (gcc) |Total|New|Outstanding|Fixed|Trend |:-:|:-:|:-:|:-:|:-: |0|0|0|0|:clap:
Tests / Release [gcc,aarch64] total: 1117, failed: 1, passed: 1116
Tests / Coverage total: 1126, passed: 1126
weaselab/conflict-set/pipeline/head There was a failure building this commit

So we can use fetch_add
This commit is contained in:
2024-07-31 14:41:42 -07:00
parent 6da3125719
commit fce998460f

View File

@@ -18,6 +18,7 @@ limitations under the License.
#include "Internal.h"
#include <algorithm>
#include <atomic>
#include <bit>
#include <cassert>
#include <compare>
@@ -552,7 +553,7 @@ struct Metric {
const char *name;
const char *help;
ConflictSet::MetricsV1::Type type;
std::atomic<double> value;
std::atomic<int64_t> value;
protected:
Metric(ConflictSet::Impl *impl, const char *name, const char *help,
@@ -563,7 +564,7 @@ struct Gauge : private Metric {
Gauge(ConflictSet::Impl *impl, const char *name, const char *help)
: Metric(impl, name, help, ConflictSet::MetricsV1::Gauge) {}
void set(double value) {
void set(int64_t value) {
this->value.store(value, std::memory_order_relaxed);
}
};
@@ -573,17 +574,10 @@ struct Counter : private Metric {
: Metric(impl, name, help, ConflictSet::MetricsV1::Counter) {}
// Expensive. Accumulate locally and then call add instead of repeatedly
// calling add.
void add(double value) {
void add(int64_t value) {
assert(value >= 0);
static_assert(std::atomic<double>::is_always_lock_free);
double old = this->value.load(std::memory_order_relaxed);
for (;;) {
double newVal = old + value;
if (this->value.compare_exchange_weak(old, newVal,
std::memory_order_relaxed)) {
break;
}
}
static_assert(std::atomic<int64_t>::is_always_lock_free);
this->value.fetch_add(value, std::memory_order_relaxed);
}
};
@@ -708,29 +702,29 @@ size_t Node::size() const {
// A type that's plumbed along the check call tree. Lifetime ends after each
// check call.
struct ReadContext {
double point_read_accum = 0;
double prefix_read_accum = 0;
double range_read_accum = 0;
double point_read_short_circuit_accum = 0;
double prefix_read_short_circuit_accum = 0;
double range_read_short_circuit_accum = 0;
double point_read_iterations_accum = 0;
double prefix_read_iterations_accum = 0;
double range_read_iterations_accum = 0;
double range_read_node_scan_accum = 0;
int64_t point_read_accum = 0;
int64_t prefix_read_accum = 0;
int64_t range_read_accum = 0;
int64_t point_read_short_circuit_accum = 0;
int64_t prefix_read_short_circuit_accum = 0;
int64_t range_read_short_circuit_accum = 0;
int64_t point_read_iterations_accum = 0;
int64_t prefix_read_iterations_accum = 0;
int64_t range_read_iterations_accum = 0;
int64_t range_read_node_scan_accum = 0;
ConflictSet::Impl *impl;
};
// A type that's plumbed along the non-const call tree. Same lifetime as
// ConflictSet::Impl
struct WriteContext {
double entries_erased_accum = 0;
double insert_iterations_accum = 0;
double entries_inserted_accum = 0;
double nodes_allocated_accum = 0;
double nodes_released_accum = 0;
double point_writes_accum = 0;
double range_writes_accum = 0;
int64_t entries_erased_accum = 0;
int64_t insert_iterations_accum = 0;
int64_t entries_inserted_accum = 0;
int64_t nodes_allocated_accum = 0;
int64_t nodes_released_accum = 0;
int64_t point_writes_accum = 0;
int64_t range_writes_accum = 0;
template <class T> T *allocate(int c) {
++nodes_allocated_accum;
if constexpr (std::is_same_v<T, Node0>) {
@@ -3122,21 +3116,15 @@ Node *firstGeqPhysical(Node *n, const std::span<const uint8_t> key) {
}
}
#define MEASURE_CHECK_CPU_TIME 0
struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
void check(const ReadRange *reads, Result *result, int count) {
#if MEASURE_CHECK_CPU_TIME
timespec ts_begin;
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts_begin);
#endif
ReadContext tls;
tls.impl = this;
int commits_accum = 0;
int conflicts_accum = 0;
int too_olds_accum = 0;
double check_byte_accum = 0;
int64_t check_byte_accum = 0;
for (int i = 0; i < count; ++i) {
const auto &r = reads[i];
check_byte_accum += r.begin.len + r.end.len;
@@ -3171,13 +3159,6 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
conflicts_total.add(conflicts_accum);
too_olds_total.add(too_olds_accum);
check_bytes_total.add(check_byte_accum);
#if MEASURE_CHECK_CPU_TIME
timespec ts_end;
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts_end);
check_cpu_seconds_total.add(
std::max<double>(0, (ts_end.tv_nsec * 1e-9 + ts_end.tv_sec) -
(ts_begin.tv_nsec * 1e-9 + ts_begin.tv_sec)));
#endif
}
void addWrites(const WriteRange *writes, int count, int64_t writeVersion) {
@@ -3203,7 +3184,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
gcScanStep(1000);
}
double write_byte_accum = 0;
int64_t write_byte_accum = 0;
for (int i = 0; i < count; ++i) {
const auto &w = writes[i];
write_byte_accum += w.begin.len + w.end.len;
@@ -3242,7 +3223,7 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
rootMaxVersion = std::max(rootMaxVersion, oldestVersion);
n = nextPhysical(n);
}
double set_oldest_iterations_accum = 0;
int64_t set_oldest_iterations_accum = 0;
for (; fuel > 0 && n != nullptr; ++set_oldest_iterations_accum) {
rezero(n, oldestVersion);
// The "make sure gc keeps up with writes" calculations assume that we're
@@ -3443,10 +3424,6 @@ struct __attribute__((visibility("hidden"))) ConflictSet::Impl {
GAUGE(
oldest_extant_version,
"A lower bound on the lowest version associated with an existing entry");
#if MEASURE_CHECK_CPU_TIME
COUNTER(check_cpu_seconds_total,
"Total cpu seconds spent in a call to check");
#endif
// ==================== END METRICS DEFINITIONS ====================
#undef GAUGE
#undef COUNTER