Compare commits

...

18 Commits

Author SHA1 Message Date
0561d951d4 Finish std::shared_ptr -> Ref migration 2025-09-11 15:06:04 -04:00
a2da7fba84 Explicitly support having a WeakRef to self 2025-09-11 14:54:42 -04:00
5d932bf36c Add polymorphism support to Ref 2025-09-11 14:15:52 -04:00
9a8d4feedd Add documentation 2025-09-11 13:54:00 -04:00
9cd83fc426 Call ~ControlBlock
It's trivially destructible, but just in case. Compiler should optimize it out
2025-09-11 13:18:19 -04:00
10e382f633 Used biased weak count, cache T* pointer
Logically, the strong pointer that destroys T owns +1 weak count too
2025-09-11 13:15:03 -04:00
f83e21b5a0 Defeat shared_ptr's single-threaded optimizations
WeaselDB is always going to start multiple threads, so we don't care
about single-threaded performance
2025-09-11 13:13:05 -04:00
5adbf8eee2 Organize bench_reference.cpp with doctest 2025-09-11 12:32:25 -04:00
2bc17cbfe6 Add bench_reference.cpp
Also update snake case script for nanobench symbols
2025-09-11 12:22:56 -04:00
89c5a2f165 Strengthen language instructing reading the style guide 2025-09-11 12:02:44 -04:00
d35a4fa4db Update multi-threaded tests/benchmarks guidance 2025-09-11 12:01:18 -04:00
994e31032f Fix data race in freeing control block 2025-09-11 11:32:59 -04:00
0f179eed88 Switch to two separate atomic counters
It's faster and still correct. I was confused remembering something
about atomic shared pointer ideas before.
2025-09-11 10:53:25 -04:00
b9106a0d3c Add test_reference.cpp 2025-09-10 22:05:31 -04:00
6aaca4c171 Finish reference.hpp 2025-09-10 21:58:08 -04:00
7c4d928807 Start on Ref/WeakRef 2025-09-10 20:04:32 -04:00
5d289ddd42 Add metric for write EAGAIN failures 2025-09-10 16:48:27 -04:00
962a010724 Simplify process_connection_writes condition
And comment explaining that we there's something more precise but more
complex available.
2025-09-10 16:45:04 -04:00
15 changed files with 1541 additions and 54 deletions

View File

@@ -268,3 +268,16 @@ add_executable(test_api_url_parser tests/test_api_url_parser.cpp)
target_link_libraries(test_api_url_parser doctest_impl weaseldb_sources_debug)
target_compile_options(test_api_url_parser PRIVATE -UNDEBUG)
add_test(NAME api_url_parser_tests COMMAND test_api_url_parser)
# Reference counting tests and benchmarks
add_executable(test_reference tests/test_reference.cpp)
target_link_libraries(test_reference doctest_impl)
target_include_directories(test_reference PRIVATE src)
target_compile_options(test_reference PRIVATE -UNDEBUG)
add_test(NAME reference_tests COMMAND test_reference)
add_executable(bench_reference benchmarks/bench_reference.cpp)
target_link_libraries(bench_reference doctest_impl nanobench_impl
Threads::Threads)
target_include_directories(bench_reference PRIVATE src)
add_test(NAME reference_benchmarks COMMAND bench_reference)

View File

@@ -0,0 +1,400 @@
#include <memory>
#include <thread>
#include <vector>
#include <doctest/doctest.h>
#include <nanobench.h>
#include "reference.hpp"
namespace {
struct TestObject {
int64_t data = 42;
TestObject() = default;
explicit TestObject(int64_t value) : data(value) {}
};
// Trait helpers for templated benchmarks
template <typename T> struct PointerTraits;
template <typename T> struct PointerTraits<std::shared_ptr<T>> {
using pointer_type = std::shared_ptr<T>;
using weak_type = std::weak_ptr<T>;
template <typename... Args> static pointer_type make(Args &&...args) {
return std::make_shared<T>(std::forward<Args>(args)...);
}
static const char *name() { return "std::shared_ptr"; }
static const char *weak_name() { return "std::weak_ptr"; }
};
template <typename T> struct PointerTraits<Ref<T>> {
using pointer_type = Ref<T>;
using weak_type = WeakRef<T>;
template <typename... Args> static pointer_type make(Args &&...args) {
return make_ref<T>(std::forward<Args>(args)...);
}
static const char *name() { return "Ref"; }
static const char *weak_name() { return "WeakRef"; }
};
// Force multi-threaded mode to defeat __libc_single_threaded optimization
void force_multithreaded() {
std::thread t([]() {});
t.join();
}
template <typename PtrType>
void benchmark_creation(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
force_multithreaded();
bench.run(std::string(Traits::name()) + " creation", [&] {
auto ptr = Traits::make(TestObject{123});
ankerl::nanobench::doNotOptimizeAway(ptr);
});
}
template <typename PtrType>
void benchmark_copy(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
force_multithreaded();
auto original = Traits::make(TestObject{123});
bench.run(std::string(Traits::name()) + " copy", [&] {
auto copy = original;
ankerl::nanobench::doNotOptimizeAway(copy);
});
}
template <typename PtrType>
void benchmark_move(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
auto original = Traits::make(TestObject{123});
bench.run(std::string(Traits::name()) + " move", [&] {
auto moved = std::move(original);
ankerl::nanobench::doNotOptimizeAway(moved);
original = std::move(moved);
ankerl::nanobench::doNotOptimizeAway(original);
});
}
template <typename PtrType>
void benchmark_weak_copy(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
force_multithreaded();
auto strong_ptr = Traits::make(TestObject{123});
typename Traits::weak_type weak_original = strong_ptr;
bench.run(std::string(Traits::weak_name()) + " copy", [&] {
auto weak_copy = weak_original;
ankerl::nanobench::doNotOptimizeAway(weak_copy);
});
}
template <typename PtrType>
void benchmark_weak_move(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
auto strong_ptr = Traits::make(TestObject{123});
typename Traits::weak_type weak_original = strong_ptr;
bench.run(std::string(Traits::weak_name()) + " move", [&] {
auto weak_moved = std::move(weak_original);
ankerl::nanobench::doNotOptimizeAway(weak_moved);
weak_original = std::move(weak_moved);
ankerl::nanobench::doNotOptimizeAway(weak_original);
});
}
template <typename PtrType>
void benchmark_dereference(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
auto ptr = Traits::make(TestObject{456});
bench.run(std::string(Traits::name()) + " dereference",
[&] { ankerl::nanobench::doNotOptimizeAway(ptr->data); });
}
template <typename PtrType>
void benchmark_weak_lock_success(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
auto strong_ptr = Traits::make(TestObject{789});
typename Traits::weak_type weak_ptr = strong_ptr;
bench.run(std::string(Traits::weak_name()) + " lock success", [&] {
auto locked = weak_ptr.lock();
ankerl::nanobench::doNotOptimizeAway(locked);
});
}
template <typename PtrType>
void benchmark_weak_lock_failure(ankerl::nanobench::Bench &bench) {
using Traits = PointerTraits<PtrType>;
typename Traits::weak_type weak_ptr;
{
auto strong_ptr = Traits::make(TestObject{999});
weak_ptr = strong_ptr;
}
bench.run(std::string(Traits::weak_name()) + " lock failure", [&] {
auto locked = weak_ptr.lock();
ankerl::nanobench::doNotOptimizeAway(locked);
});
}
template <typename PtrType>
void benchmark_multithreaded_copy(ankerl::nanobench::Bench &bench,
int num_threads) {
using Traits = PointerTraits<PtrType>;
// Create the shared object outside the benchmark
auto ptr = Traits::make(TestObject{456});
// Create background threads that will create contention
std::atomic<bool> keep_running{true};
std::vector<std::thread> background_threads;
for (int i = 0; i < num_threads - 1; ++i) {
background_threads.emplace_back([&]() {
while (keep_running.load(std::memory_order_relaxed)) {
auto copy = ptr;
ankerl::nanobench::doNotOptimizeAway(copy);
}
});
}
// Benchmark the foreground thread under contention
bench.run(std::string(Traits::name()) + " copy under contention", [&] {
auto copy = ptr;
ankerl::nanobench::doNotOptimizeAway(copy);
});
// Clean up background threads
keep_running.store(false, std::memory_order_relaxed);
for (auto &t : background_threads) {
t.join();
}
}
template <typename PtrType>
void benchmark_multithreaded_weak_lock(ankerl::nanobench::Bench &bench,
int num_threads) {
using Traits = PointerTraits<PtrType>;
// Create the shared object and weak reference outside the benchmark
auto strong_ptr = Traits::make(TestObject{789});
typename Traits::weak_type weak_ptr = strong_ptr;
// Create background threads that will create contention
std::atomic<bool> keep_running{true};
std::vector<std::thread> background_threads;
for (int i = 0; i < num_threads - 1; ++i) {
background_threads.emplace_back([&]() {
while (keep_running.load(std::memory_order_relaxed)) {
auto locked = weak_ptr.lock();
ankerl::nanobench::doNotOptimizeAway(locked);
}
});
}
// Benchmark the foreground thread under contention
bench.run(std::string(Traits::weak_name()) + " lock under contention", [&] {
auto locked = weak_ptr.lock();
ankerl::nanobench::doNotOptimizeAway(locked);
});
// Clean up background threads
keep_running.store(false, std::memory_order_relaxed);
for (auto &t : background_threads) {
t.join();
}
}
template <typename PtrType>
void benchmark_weak_copy_with_strong_contention(ankerl::nanobench::Bench &bench,
int num_threads) {
using Traits = PointerTraits<PtrType>;
// Create the shared object and weak reference outside the benchmark
auto strong_ptr = Traits::make(TestObject{456});
typename Traits::weak_type weak_ptr = strong_ptr;
// Create background threads copying the strong pointer
std::atomic<bool> keep_running{true};
std::vector<std::thread> background_threads;
for (int i = 0; i < num_threads - 1; ++i) {
background_threads.emplace_back([&]() {
while (keep_running.load(std::memory_order_relaxed)) {
auto copy = strong_ptr;
ankerl::nanobench::doNotOptimizeAway(copy);
}
});
}
// Benchmark weak reference copying under strong reference contention
bench.run(std::string(Traits::weak_name()) + " copy with strong contention",
[&] {
auto weak_copy = weak_ptr;
ankerl::nanobench::doNotOptimizeAway(weak_copy);
});
// Clean up background threads
keep_running.store(false, std::memory_order_relaxed);
for (auto &t : background_threads) {
t.join();
}
}
template <typename PtrType>
void benchmark_strong_copy_with_weak_contention(ankerl::nanobench::Bench &bench,
int num_threads) {
using Traits = PointerTraits<PtrType>;
// Create the shared object and weak reference outside the benchmark
auto strong_ptr = Traits::make(TestObject{789});
typename Traits::weak_type weak_ptr = strong_ptr;
// Create background threads copying the weak pointer
std::atomic<bool> keep_running{true};
std::vector<std::thread> background_threads;
for (int i = 0; i < num_threads - 1; ++i) {
background_threads.emplace_back([&]() {
while (keep_running.load(std::memory_order_relaxed)) {
auto weak_copy = weak_ptr;
ankerl::nanobench::doNotOptimizeAway(weak_copy);
}
});
}
// Benchmark strong reference copying under weak reference contention
bench.run(std::string(Traits::name()) + " copy with weak contention", [&] {
auto strong_copy = strong_ptr;
ankerl::nanobench::doNotOptimizeAway(strong_copy);
});
// Clean up background threads
keep_running.store(false, std::memory_order_relaxed);
for (auto &t : background_threads) {
t.join();
}
}
} // anonymous namespace
TEST_CASE("Creation performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Creation performance comparison");
bench.relative(true);
benchmark_creation<std::shared_ptr<TestObject>>(bench);
benchmark_creation<Ref<TestObject>>(bench);
}
TEST_CASE("Copy performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Copy performance comparison");
bench.relative(true);
benchmark_copy<std::shared_ptr<TestObject>>(bench);
benchmark_copy<Ref<TestObject>>(bench);
}
TEST_CASE("Move performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Move performance comparison");
bench.relative(true);
benchmark_move<std::shared_ptr<TestObject>>(bench);
benchmark_move<Ref<TestObject>>(bench);
}
TEST_CASE("Weak copy performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Weak copy performance comparison");
bench.relative(true);
benchmark_weak_copy<std::shared_ptr<TestObject>>(bench);
benchmark_weak_copy<Ref<TestObject>>(bench);
}
TEST_CASE("Weak move performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Weak move performance comparison");
bench.relative(true);
benchmark_weak_move<std::shared_ptr<TestObject>>(bench);
benchmark_weak_move<Ref<TestObject>>(bench);
}
TEST_CASE("Dereference performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Dereference performance comparison");
bench.relative(true);
benchmark_dereference<std::shared_ptr<TestObject>>(bench);
benchmark_dereference<Ref<TestObject>>(bench);
}
TEST_CASE("Weak lock success performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Weak lock success performance comparison");
bench.relative(true);
benchmark_weak_lock_success<std::shared_ptr<TestObject>>(bench);
benchmark_weak_lock_success<Ref<TestObject>>(bench);
}
TEST_CASE("Weak lock failure performance comparison") {
ankerl::nanobench::Bench bench;
bench.title("Weak lock failure performance comparison");
bench.relative(true);
benchmark_weak_lock_failure<std::shared_ptr<TestObject>>(bench);
benchmark_weak_lock_failure<Ref<TestObject>>(bench);
}
TEST_CASE("Copy performance under contention") {
const int num_threads = 3;
ankerl::nanobench::Bench bench;
bench.title("Copy performance under contention");
bench.relative(true);
bench.minEpochIterations(500000);
benchmark_multithreaded_copy<std::shared_ptr<TestObject>>(bench, num_threads);
benchmark_multithreaded_copy<Ref<TestObject>>(bench, num_threads);
}
TEST_CASE("Weak lock performance under contention") {
const int num_threads = 3;
ankerl::nanobench::Bench bench;
bench.title("Weak lock performance under contention");
bench.relative(true);
bench.minEpochIterations(500000);
benchmark_multithreaded_weak_lock<std::shared_ptr<TestObject>>(bench,
num_threads);
benchmark_multithreaded_weak_lock<Ref<TestObject>>(bench, num_threads);
}
TEST_CASE("Weak copy performance under strong reference contention") {
const int num_threads = 3;
ankerl::nanobench::Bench bench;
bench.title("Weak copy performance under strong reference contention");
bench.relative(true);
bench.minEpochIterations(500000);
benchmark_weak_copy_with_strong_contention<std::shared_ptr<TestObject>>(
bench, num_threads);
benchmark_weak_copy_with_strong_contention<Ref<TestObject>>(bench,
num_threads);
}
TEST_CASE("Strong copy performance under weak reference contention") {
const int num_threads = 3;
ankerl::nanobench::Bench bench;
bench.title("Strong copy performance under weak reference contention");
bench.relative(true);
bench.minEpochIterations(500000);
benchmark_strong_copy_with_weak_contention<std::shared_ptr<TestObject>>(
bench, num_threads);
benchmark_strong_copy_with_weak_contention<Ref<TestObject>>(bench,
num_threads);
}

View File

@@ -9,7 +9,7 @@
5. [Common Patterns](#common-patterns)
6. [Reference](#reference)
**See also:** [style.md](style.md) for comprehensive C++ coding standards and conventions.
**IMPORTANT:** Read [style.md](style.md) first - contains mandatory C++ coding standards, threading rules, and testing guidelines that must be followed for all code changes.
---

View File

@@ -26,6 +26,11 @@ thread_local auto bytes_written =
metric::create_counter("weaseldb_bytes_written_total",
"Total number of bytes written to clients")
.create({});
thread_local auto write_eagain_failures =
metric::create_counter(
"weaseldb_write_eagain_failures_total",
"Total number of write operations that failed with EAGAIN")
.create({});
} // namespace
// Static thread-local storage for iovec buffer
@@ -33,10 +38,14 @@ static thread_local std::vector<struct iovec> g_iovec_buffer{IOV_MAX};
Connection::Connection(struct sockaddr_storage addr, int fd, int64_t id,
size_t epoll_index, ConnectionHandler *handler,
Server &server)
WeakRef<Server> server)
: fd_(fd), id_(id), epoll_index_(epoll_index), addr_(addr), arena_(),
handler_(handler), server_(server.weak_from_this()) {
server.active_connections_.fetch_add(1, std::memory_order_relaxed);
handler_(handler), server_(std::move(server)) {
auto server_ref = server_.lock();
// This should only be called from a member of Server itself, so I should
// hope it's alive.
assert(server_ref);
server_ref->active_connections_.fetch_add(1, std::memory_order_relaxed);
// Increment connection metrics using thread-local instances
connections_total.inc();
@@ -50,6 +59,7 @@ Connection::~Connection() {
if (handler_) {
handler_->on_connection_closed(*this);
}
// Server may legitimately be gone now
if (auto server_ptr = server_.lock()) {
server_ptr->active_connections_.fetch_sub(1, std::memory_order_relaxed);
}
@@ -130,6 +140,8 @@ bool Connection::writeBytes() {
continue; // Standard practice: retry on signal interruption
}
if (errno == EAGAIN) {
// Increment EAGAIN failure metric
write_eagain_failures.inc();
// Increment bytes written metric before returning
if (total_bytes_written > 0) {
bytes_written.inc(total_bytes_written);

View File

@@ -3,13 +3,13 @@
#include <cassert>
#include <cstring>
#include <deque>
#include <memory>
#include <sys/socket.h>
#include <sys/uio.h>
#include <unistd.h>
#include "arena.hpp"
#include "connection_handler.hpp"
#include "reference.hpp"
#ifndef __has_feature
#define __has_feature(x) 0
@@ -330,7 +330,8 @@ private:
* @param server Reference to server associated with this connection
*/
Connection(struct sockaddr_storage addr, int fd, int64_t id,
size_t epoll_index, ConnectionHandler *handler, Server &server);
size_t epoll_index, ConnectionHandler *handler,
WeakRef<Server> server);
// Networking interface - only accessible by Server
int readBytes(char *buf, size_t buffer_size);
@@ -338,8 +339,8 @@ private:
// Direct access methods for Server
int getFd() const { return fd_; }
bool hasMessages() const { return !messages_.empty(); }
bool shouldClose() const { return closeConnection_; }
bool has_messages() const { return !messages_.empty(); }
bool should_close() const { return closeConnection_; }
size_t getEpollIndex() const { return epoll_index_; }
const int fd_;
const int64_t id_;
@@ -347,7 +348,7 @@ private:
struct sockaddr_storage addr_; // sockaddr_storage handles IPv4/IPv6
Arena arena_;
ConnectionHandler *handler_;
std::weak_ptr<Server> server_; // Weak reference to server for safe cleanup
WeakRef<Server> server_; // Weak reference to server for safe cleanup
std::deque<std::string_view, ArenaStlAllocator<std::string_view>> messages_{
ArenaStlAllocator<std::string_view>{&arena_}};

View File

@@ -4,6 +4,7 @@
#include "metric.hpp"
#include "perfetto_categories.hpp"
#include "process_collector.hpp"
#include "reference.hpp"
#include "server.hpp"
#include <csignal>
#include <cstring>
@@ -181,7 +182,7 @@ int main(int argc, char *argv[]) {
#endif
// Register the process collector for default metrics.
metric::register_collector(std::make_shared<ProcessCollector>());
metric::register_collector(make_ref<ProcessCollector>());
std::string config_file = "config.toml";

View File

@@ -444,7 +444,7 @@ struct Metric {
}
static auto &get_collectors() {
using CollectorRegistry = std::vector<std::shared_ptr<Collector>>;
using CollectorRegistry = std::vector<Ref<Collector>>;
static CollectorRegistry *collectors = new CollectorRegistry();
return *collectors;
}
@@ -1803,7 +1803,7 @@ void reset_metrics_for_testing() {
// when threads exit naturally
}
void register_collector(std::shared_ptr<Collector> collector) {
void register_collector(Ref<Collector> collector) {
std::unique_lock<std::mutex> _{Metric::mutex};
++Metric::registration_version;
Metric::get_collectors().push_back(std::move(collector));

View File

@@ -51,6 +51,7 @@
#include <vector>
#include "arena.hpp"
#include "reference.hpp"
namespace metric {
@@ -255,12 +256,12 @@ struct Collector {
/**
* @brief Register a collector with the metrics system.
*
* The system will hold a shared_ptr to the collector and call its collect()
* The system will hold a Ref to the collector and call its collect()
* method during each metric rendering.
*
* @param collector A shared_ptr to the collector to be registered.
* @param collector A Ref to the collector to be registered.
*/
void register_collector(std::shared_ptr<Collector> collector);
void register_collector(Ref<Collector> collector);
// Note: Histograms do not support callbacks due to their multi-value nature
// (buckets + sum + count). Use static histogram metrics only.

579
src/reference.hpp Normal file
View File

@@ -0,0 +1,579 @@
#pragma once
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
/**
* @brief High-performance thread-safe reference counting system
*
* This library provides custom smart pointers with shared/weak semantics,
* designed for better performance than std::shared_ptr/weak_ptr.
*
* Key features:
* - Thread-safe reference counting using atomic operations
* - Weak references to break circular dependencies
* - Single allocation for better cache locality
* - Optimized for copy/move operations
* - Compatible with std::shared_ptr semantics
*
* Basic usage:
* @code
* auto obj = make_ref<MyClass>(args...); // Create managed object
* auto copy = obj; // Copy (thread-safe)
* WeakRef<MyClass> weak = obj; // Create weak reference
* auto locked = weak.lock(); // Try to promote to strong
* @endcode
*
* Thread safety: All operations are thread-safe. Multiple threads can
* safely copy, move, and destroy references to the same object.
*/
namespace detail {
struct ControlBlock {
std::atomic<uint32_t> strong_count;
std::atomic<uint32_t> weak_count;
ControlBlock()
: strong_count(1), weak_count(1) {
} // Start with 1 strong, 1 weak (biased)
/**
* @brief Increment strong reference count
* @return Previous strong count
*/
uint32_t increment_strong() noexcept {
return strong_count.fetch_add(1, std::memory_order_relaxed);
}
/**
* @brief Decrement strong reference count
* @return Previous strong count
*/
uint32_t decrement_strong() noexcept {
return strong_count.fetch_sub(1, std::memory_order_acq_rel);
}
/**
* @brief Increment weak reference count
* @return Previous weak count
*/
uint32_t increment_weak() noexcept {
return weak_count.fetch_add(1, std::memory_order_relaxed);
}
/**
* @brief Decrement weak reference count
* @return Previous weak count
*/
uint32_t decrement_weak() noexcept {
return weak_count.fetch_sub(1, std::memory_order_acq_rel);
}
};
} // namespace detail
/**
* @brief Strong reference to a shared object (similar to std::shared_ptr)
*
* Ref<T> manages shared ownership of an object. The object is automatically
* destroyed when the last Ref pointing to it is destroyed.
*
* Usage:
* - Use make_ref<T>() to create new objects
* - Copy/assign to share ownership
* - Use get(), operator*, operator-> to access the object
* - Use operator bool() to check if valid
* - Use reset() to release ownership
*
* Limitations compared to std::shared_ptr:
* - Cannot take ownership of raw pointers
* - Objects can only be created via make_ref<T>() for proper memory layout
* - No custom deleter support
* - No enable_shared_from_this / shared_from_this() integration
* - No aliasing constructor (sharing ownership with different pointer)
* - No array support (Ref<T[]>)
* - No atomic operations (atomic_load, atomic_store, etc.)
*
* Thread safety: All operations are thread-safe. The managed object
* itself is NOT automatically thread-safe.
*/
template <typename T> struct Ref {
/**
* @brief Get raw pointer to managed object
*/
T *get() const noexcept { return ptr; }
/**
* @brief Dereference operator
*/
T &operator*() const { return *ptr; }
/**
* @brief Arrow operator
*/
T *operator->() const { return ptr; }
/**
* @brief Check if Ref is valid (not empty)
*/
explicit operator bool() const noexcept { return ptr != nullptr; }
/**
* @brief Destructor - decrements strong reference count
*/
~Ref() { release(); }
/**
* @brief Copy constructor - increments strong reference count
*/
Ref(const Ref &other) noexcept
: ptr(other.ptr), control_block(other.control_block) {
if (control_block) {
control_block->increment_strong();
}
}
/**
* @brief Converting copy constructor for polymorphism (Derived -> Base)
*/
template <typename U>
Ref(const Ref<U> &other) noexcept
requires std::is_convertible_v<U *, T *>
: ptr(other.ptr), control_block(other.control_block) {
if (control_block) {
control_block->increment_strong();
}
}
/**
* @brief Copy assignment operator
*/
Ref &operator=(const Ref &other) noexcept {
if (this != &other) {
release();
ptr = other.ptr;
control_block = other.control_block;
if (control_block) {
control_block->increment_strong();
}
}
return *this;
}
/**
* @brief Converting assignment operator for polymorphism (Derived -> Base)
*/
template <typename U>
Ref &operator=(const Ref<U> &other) noexcept
requires std::is_convertible_v<U *, T *>
{
release();
ptr = other.ptr;
control_block = other.control_block;
if (control_block) {
control_block->increment_strong();
}
return *this;
}
/**
* @brief Move constructor - transfers ownership
*/
Ref(Ref &&other) noexcept
: ptr(other.ptr), control_block(other.control_block) {
other.ptr = nullptr;
other.control_block = nullptr;
}
/**
* @brief Converting move constructor for polymorphism (Derived -> Base)
*/
template <typename U>
Ref(Ref<U> &&other) noexcept
requires std::is_convertible_v<U *, T *>
: ptr(other.ptr), control_block(other.control_block) {
other.ptr = nullptr;
other.control_block = nullptr;
}
/**
* @brief Move assignment operator
*/
Ref &operator=(Ref &&other) noexcept {
if (this != &other) {
release();
ptr = other.ptr;
control_block = other.control_block;
other.ptr = nullptr;
other.control_block = nullptr;
}
return *this;
}
/**
* @brief Converting move assignment operator for polymorphism (Derived ->
* Base)
*/
template <typename U>
Ref &operator=(Ref<U> &&other) noexcept
requires std::is_convertible_v<U *, T *>
{
release();
ptr = other.ptr;
control_block = other.control_block;
other.ptr = nullptr;
other.control_block = nullptr;
return *this;
}
/**
* @brief Reset to empty state
*/
void reset() noexcept {
release();
ptr = nullptr;
control_block = nullptr;
}
/**
* @brief Equality comparison
*/
bool operator==(const Ref &other) const noexcept {
return control_block == other.control_block;
}
/**
* @brief Inequality comparison
*/
bool operator!=(const Ref &other) const noexcept { return !(*this == other); }
/**
* @brief Default constructor - creates empty Ref
*/
Ref() : ptr(nullptr), control_block(nullptr) {}
private:
explicit Ref(T *object_ptr, detail::ControlBlock *cb)
: ptr(object_ptr), control_block(cb) {}
T *ptr;
detail::ControlBlock *control_block;
/**
* @brief Release current reference and handle cleanup
*/
void release() noexcept {
if (control_block) {
uint32_t prev_strong = control_block->decrement_strong();
// If this was the last strong reference, destroy the object
if (prev_strong == 1) {
// We need to call the destructor before we decrement the weak count, to
// account for the possibility that T has a WeakRef to itself.
ptr->~T();
// Release the bias - decrement weak count for strong references
uint32_t prev_weak = control_block->decrement_weak();
// If weak count hits 0, destroy and free control block
if (prev_weak == 1) {
control_block->~ControlBlock();
std::free(control_block);
}
}
}
}
template <typename U, typename... Args>
friend Ref<U> make_ref(Args &&...args);
template <typename U> friend struct WeakRef;
template <typename U> friend struct Ref;
};
/**
* @brief Weak reference to a shared object (similar to std::weak_ptr)
*
* WeakRef<T> holds a non-owning reference to an object managed by Ref<T>.
* It can be used to break circular dependencies and safely observe objects
* that might be destroyed by other threads.
*
* Usage:
* - Create from Ref<T> to observe without owning
* - Use lock() to attempt promotion to Ref<T>
* - Returns empty Ref<T> if object was already destroyed
* - Use reset() to stop observing
*
* Self-referencing pattern: Objects can safely contain WeakRef members
* pointing to themselves. The implementation ensures proper destruction
* order to prevent use-after-free when the object destructor runs.
*
* Thread safety: All operations are thread-safe. The observed object
* may be destroyed by other threads at any time.
*/
template <typename T> struct WeakRef {
/**
* @brief Attempt to promote WeakRef to Ref
* @return Valid Ref if object still alive, empty Ref otherwise
*/
Ref<T> lock() const {
if (!control_block) {
return Ref<T>();
}
// Try to increment strong count if it's not zero
uint32_t expected_strong =
control_block->strong_count.load(std::memory_order_relaxed);
while (expected_strong > 0) {
// Try to increment the strong count
if (control_block->strong_count.compare_exchange_weak(
expected_strong, expected_strong + 1, std::memory_order_acquire,
std::memory_order_relaxed)) {
// Success - we incremented the strong count
return Ref<T>(get_object_ptr(), control_block);
}
// CAS failed, expected_strong now contains the current value, retry
}
// Strong count was 0, object is being destroyed
return Ref<T>();
}
/**
* @brief Destructor - decrements weak reference count
*/
~WeakRef() { release(); }
/**
* @brief Copy constructor from WeakRef
*/
WeakRef(const WeakRef &other) noexcept : control_block(other.control_block) {
if (control_block) {
control_block->increment_weak();
}
}
/**
* @brief Copy constructor from Ref
*/
WeakRef(const Ref<T> &ref) noexcept : control_block(ref.control_block) {
if (control_block) {
control_block->increment_weak();
}
}
/**
* @brief Converting copy constructor from WeakRef for polymorphism
*/
template <typename U>
WeakRef(const WeakRef<U> &other) noexcept
requires std::is_convertible_v<U *, T *>
: control_block(other.control_block) {
if (control_block) {
control_block->increment_weak();
}
}
/**
* @brief Converting copy constructor from Ref for polymorphism
*/
template <typename U>
WeakRef(const Ref<U> &ref) noexcept
requires std::is_convertible_v<U *, T *>
: control_block(ref.control_block) {
if (control_block) {
control_block->increment_weak();
}
}
/**
* @brief Converting copy assignment from WeakRef for polymorphism
*/
template <typename U>
WeakRef &operator=(const WeakRef<U> &other) noexcept
requires std::is_convertible_v<U *, T *>
{
release();
control_block = other.control_block;
if (control_block) {
control_block->increment_weak();
}
return *this;
}
/**
* @brief Converting copy assignment from Ref for polymorphism
*/
template <typename U>
WeakRef &operator=(const Ref<U> &ref) noexcept
requires std::is_convertible_v<U *, T *>
{
release();
control_block = ref.control_block;
if (control_block) {
control_block->increment_weak();
}
return *this;
}
/**
* @brief Converting move constructor from WeakRef for polymorphism
*/
template <typename U>
WeakRef(WeakRef<U> &&other) noexcept
requires std::is_convertible_v<U *, T *>
: control_block(other.control_block) {
other.control_block = nullptr;
}
/**
* @brief Converting move assignment from WeakRef for polymorphism
*/
template <typename U>
WeakRef &operator=(WeakRef<U> &&other) noexcept
requires std::is_convertible_v<U *, T *>
{
release();
control_block = other.control_block;
other.control_block = nullptr;
return *this;
}
/**
* @brief Copy assignment from WeakRef
*/
WeakRef &operator=(const WeakRef &other) noexcept {
if (this != &other) {
release();
control_block = other.control_block;
if (control_block) {
control_block->increment_weak();
}
}
return *this;
}
/**
* @brief Copy assignment from Ref
*/
WeakRef &operator=(const Ref<T> &ref) noexcept {
release();
control_block = ref.control_block;
if (control_block) {
control_block->increment_weak();
}
return *this;
}
/**
* @brief Move constructor
*/
WeakRef(WeakRef &&other) noexcept : control_block(other.control_block) {
other.control_block = nullptr;
}
/**
* @brief Move assignment
*/
WeakRef &operator=(WeakRef &&other) noexcept {
if (this != &other) {
release();
control_block = other.control_block;
other.control_block = nullptr;
}
return *this;
}
/**
* @brief Reset to empty state
*/
void reset() noexcept {
release();
control_block = nullptr;
}
/**
* @brief Default constructor - creates empty WeakRef
*/
WeakRef() : control_block(nullptr) {}
private:
explicit WeakRef(detail::ControlBlock *cb) : control_block(cb) {}
detail::ControlBlock *control_block;
// Helper to calculate object pointer from control block
T *get_object_ptr() const {
if (!control_block)
return nullptr;
constexpr size_t cb_size = sizeof(detail::ControlBlock);
constexpr size_t alignment = alignof(T);
constexpr size_t padded_cb_size =
(cb_size + alignment - 1) & ~(alignment - 1);
return reinterpret_cast<T *>(reinterpret_cast<char *>(control_block) +
padded_cb_size);
}
/**
* @brief Release current weak reference and handle cleanup
*/
void release() noexcept {
if (control_block) {
uint32_t prev_weak = control_block->decrement_weak();
// If weak count hits 0, destroy and free control block
if (prev_weak == 1) {
control_block->~ControlBlock();
std::free(control_block);
}
}
}
template <typename U> friend struct Ref;
template <typename U> friend struct WeakRef;
};
/**
* @brief Create a new managed object wrapped in Ref<T>
*
* This is the only way to create Ref<T> objects. It performs a single
* allocation for both the control block and object, improving cache locality.
*
* @tparam T Type of object to create
* @tparam Args Types of constructor arguments
* @param args Arguments forwarded to T's constructor
* @return Ref<T> managing the newly created object
*
* Example:
* @code
* auto obj = make_ref<MyClass>(arg1, arg2);
* auto empty_vec = make_ref<std::vector<int>>();
* @endcode
*
* Thread safety: Safe to call from multiple threads simultaneously.
*/
template <typename T, typename... Args> Ref<T> make_ref(Args &&...args) {
constexpr size_t cb_size = sizeof(detail::ControlBlock);
constexpr size_t alignment = alignof(T);
constexpr size_t padded_cb_size =
(cb_size + alignment - 1) & ~(alignment - 1);
constexpr size_t total_alignment =
std::max(alignof(detail::ControlBlock), alignment);
constexpr size_t total_size = padded_cb_size + sizeof(T);
constexpr size_t aligned_total_size =
(total_size + total_alignment - 1) & ~(total_alignment - 1);
char *buf = reinterpret_cast<char *>(
std::aligned_alloc(total_alignment, aligned_total_size));
if (!buf) {
std::fprintf(stderr, "Out of memory\n");
std::abort();
}
auto *cb = new (buf) detail::ControlBlock();
T *obj = new (buf + padded_cb_size) T{std::forward<Args>(args)...};
return Ref<T>(obj, cb);
}

View File

@@ -21,12 +21,12 @@
// Static thread-local storage for read buffer (used across different functions)
static thread_local std::vector<char> g_read_buffer;
std::shared_ptr<Server> Server::create(const weaseldb::Config &config,
ConnectionHandler &handler,
const std::vector<int> &listen_fds) {
// Use std::shared_ptr constructor with private access
// We can't use make_shared here because constructor is private
return std::shared_ptr<Server>(new Server(config, handler, listen_fds));
Ref<Server> Server::create(const weaseldb::Config &config,
ConnectionHandler &handler,
const std::vector<int> &listen_fds) {
auto result = make_ref<Server>(config, handler, listen_fds);
result->self_ = result;
return result;
}
Server::Server(const weaseldb::Config &config, ConnectionHandler &handler,
@@ -162,7 +162,7 @@ void Server::receiveConnectionBack(std::unique_ptr<Connection> connection) {
// Re-add the connection to epoll for continued processing
struct epoll_event event{};
if (!connection->hasMessages()) {
if (!connection->has_messages()) {
event.events = EPOLLIN | EPOLLONESHOT;
} else {
event.events = EPOLLOUT | EPOLLONESHOT;
@@ -218,7 +218,7 @@ int Server::create_local_connection() {
// Create Connection object
auto connection = std::unique_ptr<Connection>(new Connection(
addr, server_fd, connection_id_.fetch_add(1, std::memory_order_relaxed),
epoll_index, &handler_, *this));
epoll_index, &handler_, self_));
// Store in registry
connection_registry_.store(server_fd, std::move(connection));
@@ -422,7 +422,7 @@ void Server::start_io_threads(std::vector<std::thread> &threads) {
batch[batch_count] = std::unique_ptr<Connection>(new Connection(
addr, fd,
connection_id_.fetch_add(1, std::memory_order_relaxed),
epoll_index, &handler_, *this));
epoll_index, &handler_, self_));
batch_events[batch_count] =
EPOLLIN; // New connections always start with read
batch_count++;
@@ -482,12 +482,13 @@ void Server::process_connection_reads(std::unique_ptr<Connection> &conn,
}
void Server::process_connection_writes(std::unique_ptr<Connection> &conn,
int events) {
int /*events*/) {
assert(conn);
// Send immediately if we have outgoing messages (either from EPOLLOUT or
// after reading)
if ((events & EPOLLOUT) || ((events & EPOLLIN) && conn->hasMessages())) {
bool had_messages = conn->hasMessages();
// For simplicity, we always attempt to write when an event fires. We could be
// more precise and skip the write if we detect that we've already seen EAGAIN
// on this connection and we don't have EPOLLOUT.
if (conn->has_messages()) {
bool had_messages = conn->has_messages();
bool error = conn->writeBytes();
if (error) {
@@ -504,7 +505,7 @@ void Server::process_connection_writes(std::unique_ptr<Connection> &conn,
}
// Check if buffer became empty (transition from non-empty -> empty)
if (had_messages && !conn->hasMessages()) {
if (had_messages && !conn->has_messages()) {
handler_.on_write_buffer_drained(conn);
// If handler took ownership (conn is now null), return
if (!conn) {
@@ -513,7 +514,7 @@ void Server::process_connection_writes(std::unique_ptr<Connection> &conn,
}
// Check if we should close the connection according to application
if (!conn->hasMessages() && conn->shouldClose()) {
if (!conn->has_messages() && conn->should_close()) {
conn.reset(); // Connection should be closed
return;
}
@@ -547,7 +548,7 @@ void Server::process_connection_batch(
int fd = conn_ptr->getFd();
struct epoll_event event{};
if (!conn_ptr->hasMessages()) {
if (!conn_ptr->has_messages()) {
event.events = EPOLLIN | EPOLLONESHOT;
} else {
event.events = EPOLLOUT | EPOLLONESHOT;

View File

@@ -9,6 +9,7 @@
#include "config.hpp"
#include "connection_handler.hpp"
#include "connection_registry.hpp"
#include "reference.hpp"
/**
* High-performance multi-threaded server for handling network connections.
@@ -28,18 +29,18 @@
*
* IMPORTANT: Server uses a factory pattern and MUST be created via
* Server::create(). This ensures:
* - Proper shared_ptr semantics for enable_shared_from_this
* - Safe weak_ptr references from Connection objects
* - Proper Ref<Server> semantics for reference counting
* - Safe WeakRef<Server> references from Connection objects
* - Prevention of accidental stack allocation that would break safety
* guarantees
*/
struct Server : std::enable_shared_from_this<Server> {
struct Server {
/**
* Factory method to create a Server instance.
*
* This is the only way to create a Server - ensures proper shared_ptr
* This is the only way to create a Server - ensures proper Ref<Server>
* semantics and prevents accidental stack allocation that would break
* weak_ptr safety.
* WeakRef<Server> safety.
*
* @param config Server configuration (threads, ports, limits, etc.)
* @param handler Protocol handler for processing connection data
@@ -47,11 +48,11 @@ struct Server : std::enable_shared_from_this<Server> {
* Server takes ownership and will close them on
* destruction. Server will set these to non-blocking mode for safe epoll
* usage. Empty vector means no listening sockets.
* @return shared_ptr to the newly created Server
* @return Ref to the newly created Server
*/
static std::shared_ptr<Server> create(const weaseldb::Config &config,
ConnectionHandler &handler,
const std::vector<int> &listen_fds);
static Ref<Server> create(const weaseldb::Config &config,
ConnectionHandler &handler,
const std::vector<int> &listen_fds);
/**
* Destructor ensures proper cleanup of all resources.
@@ -121,6 +122,10 @@ private:
*/
explicit Server(const weaseldb::Config &config, ConnectionHandler &handler,
const std::vector<int> &listen_fds);
friend Ref<Server> make_ref<Server>(const weaseldb::Config &config,
ConnectionHandler &handler,
const std::vector<int> &listen_fds);
WeakRef<Server> self_;
const weaseldb::Config &config_;
ConnectionHandler &handler_;

View File

@@ -591,13 +591,24 @@ TEST_CASE("Server accepts connections") {
#### Threading Checklist for Tests/Benchmarks
**MANDATORY: Before writing any `std::thread` or `threads.emplace_back()`:**
**Common threading principles (all concurrent code):**
- **Count total threads** - Include main/benchmark thread in count
- **Always assume concurrent execution needed** - Tests/benchmarks require real concurrency
- **Add synchronization primitive** - `std::latch start_latch{N}` (most common), `std::barrier`, or similar where N = total concurrent threads
- **Each thread synchronizes before doing work** - e.g., `start_latch.arrive_and_wait()` or `barrier.arrive_and_wait()`
- **Main thread synchronizes before measurement/execution** - ensures all threads start simultaneously
1. **Count total threads** - Include main/benchmark thread in count
2. **Always assume concurrent execution needed** - Tests/benchmarks require real concurrency
3. **Add `std::latch start_latch{N}`** where N = total concurrent threads
4. **Each thread calls `start_latch.arrive_and_wait()`** before doing work
5. **Main/benchmark thread calls `start_latch.arrive_and_wait()`** before measurement
**Test-specific:**
- **Perform many operations per thread creation** - amortize thread creation cost and increase chances of hitting race conditions
- **Pattern: Create test that spawns threads and runs many operations, then run that test many times** - amortizes thread creation cost while providing fresh test instances
- **Run 100-10000 operations per test, and 100-10000 test iterations** - maximizes chances of hitting race conditions
- **Always run with ThreadSanitizer** - compile with `-fsanitize=thread`
**Benchmark-specific:**
- **NEVER create threads inside the benchmark measurement** - creates thread creation/destruction overhead, not contention
- **Create background threads OUTSIDE the benchmark** that run continuously during measurement
- **Use `std::atomic<bool> keep_running` to cleanly shut down background threads after benchmark**
- **Measure only the foreground operation under real contention from background threads**
**Red flags to catch immediately:**
- ❌ Creating threads in a loop without `std::latch`
@@ -609,18 +620,18 @@ TEST_CASE("Server accepts connections") {
```cpp
// BAD: Race likely over before threads start
std::atomic<int> counter{0};
int counter = 0;
for (int i = 0; i < 4; ++i) {
threads.emplace_back([&]() { counter++; }); // Probably sequential
}
// GOOD: Force threads to race simultaneously
std::atomic<int> counter{0};
int counter = 0;
std::latch start_latch{4};
for (int i = 0; i < 4; ++i) {
threads.emplace_back([&]() {
start_latch.count_down_and_wait(); // All threads start together
counter++; // Now they actually race
counter++; // Now they actually race (data race on non-atomic)
});
}
```

461
tests/test_reference.cpp Normal file
View File

@@ -0,0 +1,461 @@
#include <barrier>
#include <doctest/doctest.h>
#include <latch>
#include <thread>
#include <vector>
#include "reference.hpp"
namespace {
struct TestObject {
int value;
explicit TestObject(int v) : value(v) {}
};
struct Node {
int data;
Ref<Node> next;
WeakRef<Node> parent;
explicit Node(int d) : data(d) {}
};
// Classes for polymorphism testing
struct Base {
int base_value;
explicit Base(int v) : base_value(v) {}
virtual ~Base() = default;
virtual int get_value() const { return base_value; }
};
struct Derived : public Base {
int derived_value;
explicit Derived(int base_v, int derived_v)
: Base(base_v), derived_value(derived_v) {}
int get_value() const override { return base_value + derived_value; }
};
struct AnotherDerived : public Base {
int another_value;
explicit AnotherDerived(int base_v, int another_v)
: Base(base_v), another_value(another_v) {}
int get_value() const override { return base_value * another_value; }
};
} // anonymous namespace
TEST_CASE("Ref basic functionality") {
SUBCASE("make_ref creates valid Ref") {
auto ref = make_ref<TestObject>(42);
CHECK(ref);
CHECK(ref.get() != nullptr);
CHECK(ref->value == 42);
CHECK((*ref).value == 42);
}
SUBCASE("copy construction increments reference count") {
auto ref1 = make_ref<TestObject>(123);
auto ref2 = ref1;
CHECK(ref1);
CHECK(ref2);
CHECK(ref1.get() == ref2.get());
CHECK(ref1->value == 123);
CHECK(ref2->value == 123);
}
SUBCASE("copy assignment works correctly") {
auto ref1 = make_ref<TestObject>(100);
auto ref2 = make_ref<TestObject>(200);
ref2 = ref1;
CHECK(ref1.get() == ref2.get());
CHECK(ref1->value == 100);
CHECK(ref2->value == 100);
}
SUBCASE("move construction transfers ownership") {
auto ref1 = make_ref<TestObject>(456);
auto *ptr = ref1.get();
auto ref2 = std::move(ref1);
CHECK(!ref1);
CHECK(ref2);
CHECK(ref2.get() == ptr);
CHECK(ref2->value == 456);
}
SUBCASE("move assignment transfers ownership") {
auto ref1 = make_ref<TestObject>(789);
auto ref2 = make_ref<TestObject>(999);
auto *ptr = ref1.get();
ref2 = std::move(ref1);
CHECK(!ref1);
CHECK(ref2);
CHECK(ref2.get() == ptr);
CHECK(ref2->value == 789);
}
SUBCASE("reset clears reference") {
auto ref = make_ref<TestObject>(111);
CHECK(ref);
ref.reset();
CHECK(!ref);
CHECK(ref.get() == nullptr);
}
}
TEST_CASE("WeakRef basic functionality") {
SUBCASE("construction from Ref") {
auto ref = make_ref<TestObject>(333);
WeakRef<TestObject> weak_ref = ref;
auto locked = weak_ref.lock();
CHECK(locked);
CHECK(locked.get() == ref.get());
CHECK(locked->value == 333);
}
SUBCASE("lock() returns empty when object destroyed") {
WeakRef<TestObject> weak_ref;
{
auto ref = make_ref<TestObject>(444);
weak_ref = ref;
}
// ref goes out of scope, object should be destroyed
auto locked = weak_ref.lock();
CHECK(!locked);
}
SUBCASE("copy and move semantics") {
auto ref = make_ref<TestObject>(666);
WeakRef<TestObject> weak1 = ref;
WeakRef<TestObject> weak2 = weak1; // copy
WeakRef<TestObject> weak3 = std::move(weak1); // move
auto locked2 = weak2.lock();
auto locked3 = weak3.lock();
CHECK(locked2);
CHECK(locked3);
CHECK(locked2->value == 666);
CHECK(locked3->value == 666);
}
}
TEST_CASE("Ref thread safety") {
SUBCASE("concurrent copying") {
const int num_threads = 4;
const int copies_per_thread = 100;
const int test_iterations = 1000;
for (int iter = 0; iter < test_iterations; ++iter) {
auto ref = make_ref<TestObject>(777);
std::vector<std::thread> threads;
std::latch start_latch{num_threads + 1};
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back([&]() {
start_latch.arrive_and_wait();
for (int j = 0; j < copies_per_thread; ++j) {
auto copy = ref;
CHECK(copy);
CHECK(copy->value == 777);
}
});
}
start_latch.arrive_and_wait();
for (auto &t : threads) {
t.join();
}
CHECK(ref);
CHECK(ref->value == 777);
}
}
}
TEST_CASE("Control block cleanup race condition test") {
// This test specifically targets the race condition where both
// the last strong reference and last weak reference are destroyed
// simultaneously, potentially causing double-free of control block
const int test_iterations = 10000;
// Shared state for passing references between threads
Ref<TestObject> ptr1;
WeakRef<TestObject> ptr2;
auto setup = [&]() {
ptr1 = make_ref<TestObject>(0);
ptr2 = ptr1;
};
// Barrier for synchronization - 2 participants (main thread + worker thread)
std::barrier sync_barrier{2};
std::thread worker_thread([&]() {
for (int iter = 0; iter < test_iterations; ++iter) {
// Wait for main thread to create the references
sync_barrier.arrive_and_wait();
// Worker thread destroys the weak reference simultaneously with main
// thread
ptr2.reset();
// Wait for next iteration
sync_barrier.arrive_and_wait();
}
});
for (int iter = 0; iter < test_iterations; ++iter) {
// Create references
setup();
// Both threads are ready - synchronize for simultaneous destruction
sync_barrier.arrive_and_wait();
// Main thread destroys the strong reference at the same time
// as worker thread destroys the weak reference
ptr1.reset();
// Wait for both destructions to complete
sync_barrier.arrive_and_wait();
// Clean up for next iteration
ptr1.reset();
ptr2.reset();
}
worker_thread.join();
// If we reach here without segfault/double-free, the test passes
// The bug would manifest as a crash or memory corruption
}
TEST_CASE("WeakRef prevents circular references") {
SUBCASE("simple weak reference lifecycle") {
WeakRef<TestObject> weak_ref;
// Create object and weak reference
{
auto ref = make_ref<TestObject>(123);
weak_ref = ref;
// Should be able to lock while object exists
auto locked = weak_ref.lock();
CHECK(locked);
CHECK(locked->value == 123);
}
// Object destroyed when ref goes out of scope
// Should not be able to lock after object destroyed
auto locked = weak_ref.lock();
CHECK(!locked);
}
SUBCASE("parent-child cycle with WeakRef breaks cycle") {
auto parent = make_ref<Node>(1);
auto child = make_ref<Node>(2);
// Create potential cycle
parent->next = child; // Strong reference: parent → child
child->parent = parent; // WeakRef: child ⇝ parent (breaks cycle)
CHECK(parent->data == 1);
CHECK(child->data == 2);
CHECK(parent->next == child);
// Verify weak reference works while parent exists
CHECK(child->parent.lock() == parent);
// Clear the only strong reference to parent
parent.reset(); // This should destroy the parent object
// Now child's weak reference should fail to lock since parent is destroyed
CHECK(!child->parent.lock());
}
}
TEST_CASE("Polymorphic Ref conversions") {
SUBCASE("copy construction from derived to base") {
auto derived_ref = make_ref<Derived>(10, 20);
CHECK(derived_ref->get_value() == 30); // 10 + 20
// Convert Ref<Derived> to Ref<Base>
Ref<Base> base_ref = derived_ref;
CHECK(base_ref);
CHECK(base_ref->get_value() == 30); // Virtual dispatch works
CHECK(base_ref->base_value == 10);
// Both should point to same object
CHECK(base_ref.get() == derived_ref.get());
}
SUBCASE("copy assignment from derived to base") {
auto derived_ref = make_ref<Derived>(5, 15);
auto base_ref = make_ref<Base>(100);
// Before assignment
CHECK(base_ref->get_value() == 100);
// Assign derived to base
base_ref = derived_ref;
CHECK(base_ref->get_value() == 20); // 5 + 15
CHECK(base_ref.get() == derived_ref.get());
}
SUBCASE("move construction from derived to base") {
auto derived_ref = make_ref<Derived>(7, 3);
Base *original_ptr = derived_ref.get();
// Move construct base from derived
Ref<Base> base_ref = std::move(derived_ref);
CHECK(base_ref);
CHECK(base_ref->get_value() == 10); // 7 + 3
CHECK(base_ref.get() == original_ptr);
CHECK(!derived_ref); // Original should be empty after move
}
SUBCASE("move assignment from derived to base") {
auto derived_ref = make_ref<Derived>(8, 12);
auto base_ref = make_ref<Base>(200);
Base *derived_ptr = derived_ref.get();
// Move assign
base_ref = std::move(derived_ref);
CHECK(base_ref);
CHECK(base_ref->get_value() == 20); // 8 + 12
CHECK(base_ref.get() == derived_ptr);
CHECK(!derived_ref); // Should be empty after move
}
SUBCASE("multiple inheritance levels") {
auto another_derived = make_ref<AnotherDerived>(6, 4);
CHECK(another_derived->get_value() == 24); // 6 * 4
// Convert to base
Ref<Base> base_ref = another_derived;
CHECK(base_ref->get_value() == 24); // Virtual dispatch
CHECK(base_ref.get() == another_derived.get());
}
}
TEST_CASE("Polymorphic WeakRef conversions") {
SUBCASE("WeakRef copy construction from derived to base") {
auto derived_ref = make_ref<Derived>(3, 7);
// Create WeakRef<Derived>
WeakRef<Derived> weak_derived = derived_ref;
// Convert to WeakRef<Base>
WeakRef<Base> weak_base = weak_derived;
// Both should lock to same object
auto locked_derived = weak_derived.lock();
auto locked_base = weak_base.lock();
CHECK(locked_derived);
CHECK(locked_base);
CHECK(locked_derived.get() == locked_base.get());
CHECK(locked_base->get_value() == 10); // 3 + 7
}
SUBCASE("WeakRef copy assignment from derived to base") {
auto derived_ref = make_ref<Derived>(4, 6);
auto base_ref = make_ref<Base>(999);
WeakRef<Derived> weak_derived = derived_ref;
WeakRef<Base> weak_base = base_ref;
// Assign derived weak ref to base weak ref
weak_base = weak_derived;
auto locked = weak_base.lock();
CHECK(locked);
CHECK(locked->get_value() == 10); // 4 + 6
CHECK(locked.get() == derived_ref.get());
}
SUBCASE("WeakRef from Ref<Derived> to WeakRef<Base>") {
auto derived_ref = make_ref<Derived>(2, 8);
// Create WeakRef<Base> directly from Ref<Derived>
WeakRef<Base> weak_base = derived_ref;
auto locked = weak_base.lock();
CHECK(locked);
CHECK(locked->get_value() == 10); // 2 + 8
CHECK(locked.get() == derived_ref.get());
}
SUBCASE("WeakRef move operations") {
auto derived_ref = make_ref<Derived>(1, 9);
WeakRef<Derived> weak_derived = derived_ref;
// Move construct
WeakRef<Base> weak_base = std::move(weak_derived);
// Original should be empty, new should work
CHECK(!weak_derived.lock());
auto locked = weak_base.lock();
CHECK(locked);
CHECK(locked->get_value() == 10); // 1 + 9
}
}
TEST_CASE("Polymorphic edge cases") {
SUBCASE("empty Ref conversions") {
Ref<Derived> empty_derived;
CHECK(!empty_derived);
// Convert empty derived to base
Ref<Base> empty_base = empty_derived;
CHECK(!empty_base);
// Move empty derived to base
Ref<Base> moved_base = std::move(empty_derived);
CHECK(!moved_base);
}
SUBCASE("empty WeakRef conversions") {
WeakRef<Derived> empty_weak_derived;
CHECK(!empty_weak_derived.lock());
// Convert empty weak derived to weak base
WeakRef<Base> empty_weak_base = empty_weak_derived;
CHECK(!empty_weak_base.lock());
}
SUBCASE("mixed Ref and WeakRef conversions") {
auto derived_ref = make_ref<Derived>(5, 5);
// Ref<Derived> → WeakRef<Base>
WeakRef<Base> weak_base_from_ref = derived_ref;
// WeakRef<Base> → Ref<Base> via lock
auto base_ref_from_weak = weak_base_from_ref.lock();
CHECK(base_ref_from_weak);
CHECK(base_ref_from_weak->get_value() == 10); // 5 + 5
CHECK(base_ref_from_weak.get() == derived_ref.get());
}
}
// Should be run with asan or valgrind
TEST_CASE("Self-referencing WeakRef pattern") {
struct AmIAlive {
volatile int x;
~AmIAlive() { x = 0; }
};
struct SelfReferencing {
AmIAlive am;
WeakRef<SelfReferencing> self_;
};
auto x = make_ref<SelfReferencing>();
x->self_ = x;
}

View File

@@ -66,6 +66,8 @@ def check_snake_case_violations(filepath, check_new_only=True):
exclusions = [
# C++ standard library and common libraries
r"\b(std::|weaseljson|simdutf|doctest)",
# Nanobench library API (external camelCase API)
r"\b(nanobench::|doNotOptimizeAway|minEpochIterations)\b",
# Template parameters and concepts
r"\b[A-Z][a-zA-Z0-9_]*\b",
# Class/struct names (PascalCase is correct)

View File

@@ -248,7 +248,7 @@ struct Connection {
}
// Match server's connection state management
bool hasMessages() const { return !request.empty(); }
bool has_messages() const { return !request.empty(); }
bool error = false;
~Connection() {
@@ -698,7 +698,7 @@ int main(int argc, char *argv[]) {
// Transfer back to epoll instance. This thread or another thread
// will wake when fd is ready
if (conn->hasMessages()) {
if (conn->has_messages()) {
events[i].events = EPOLLOUT | EPOLLONESHOT;
} else {
events[i].events = EPOLLIN | EPOLLONESHOT;
@@ -748,7 +748,7 @@ int main(int argc, char *argv[]) {
// Try to write once in the connect thread before handing off to network
// threads
assert(conn->hasMessages());
assert(conn->has_messages());
bool writeFinished = conn->writeBytes();
if (conn->error) {
continue; // Connection failed, destructor will clean up
@@ -766,7 +766,7 @@ int main(int argc, char *argv[]) {
event.events = EPOLLIN | EPOLLONESHOT;
} else {
event.events =
(conn->hasMessages() ? EPOLLOUT : EPOLLIN) | EPOLLONESHOT;
(conn->has_messages() ? EPOLLOUT : EPOLLIN) | EPOLLONESHOT;
}
// Add to a round-robin selected epoll instance to distribute load