diff --git a/CMakeLists.txt b/CMakeLists.txt index 5f5450b..fab3be7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -138,7 +138,7 @@ target_link_libraries(nanobench_impl PUBLIC nanobench) # Define all source files in one place set(WEASELDB_SOURCES - src/arena_allocator.cpp + src/arena.cpp src/cpu_work.cpp src/format.cpp src/metric.cpp @@ -184,9 +184,9 @@ else() target_compile_options(weaseldb_sources_debug PRIVATE -UNDEBUG) endif() -add_executable(test_arena_allocator tests/test_arena_allocator.cpp) -target_link_libraries(test_arena_allocator doctest_impl weaseldb_sources_debug) -target_compile_options(test_arena_allocator PRIVATE -UNDEBUG) +add_executable(test_arena tests/test_arena.cpp) +target_link_libraries(test_arena doctest_impl weaseldb_sources_debug) +target_compile_options(test_arena PRIVATE -UNDEBUG) add_executable( test_commit_request @@ -215,8 +215,8 @@ target_compile_options(test_metric PRIVATE -UNDEBUG) # Register with CTest add_test(NAME metric_tests COMMAND test_metric) -add_executable(bench_arena_allocator benchmarks/bench_arena_allocator.cpp) -target_link_libraries(bench_arena_allocator nanobench_impl weaseldb_sources) +add_executable(bench_arena benchmarks/bench_arena.cpp) +target_link_libraries(bench_arena nanobench_impl weaseldb_sources) add_executable(bench_cpu_work benchmarks/bench_cpu_work.cpp src/cpu_work.cpp) target_link_libraries(bench_cpu_work nanobench_impl) @@ -253,12 +253,12 @@ target_link_libraries(debug_arena weaseldb_sources) add_executable(load_tester tools/load_tester.cpp) target_link_libraries(load_tester Threads::Threads llhttp_static perfetto) -add_test(NAME arena_allocator_tests COMMAND test_arena_allocator) +add_test(NAME arena_tests COMMAND test_arena) add_test(NAME commit_request_tests COMMAND test_commit_request) add_test(NAME http_handler_tests COMMAND test_http_handler) add_test(NAME server_connection_return_tests COMMAND test_server_connection_return) -add_test(NAME arena_allocator_benchmarks COMMAND bench_arena_allocator) +add_test(NAME arena_benchmarks COMMAND bench_arena) add_test(NAME commit_request_benchmarks COMMAND bench_commit_request) add_test(NAME parser_comparison_benchmarks COMMAND bench_parser_comparison) add_test(NAME thread_pipeline_benchmarks COMMAND bench_thread_pipeline) diff --git a/benchmarks/bench_arena_allocator.cpp b/benchmarks/bench_arena.cpp similarity index 90% rename from benchmarks/bench_arena_allocator.cpp rename to benchmarks/bench_arena.cpp index 6bdc5ef..99a8fa8 100644 --- a/benchmarks/bench_arena_allocator.cpp +++ b/benchmarks/bench_arena.cpp @@ -1,4 +1,4 @@ -#include "arena_allocator.hpp" +#include "arena.hpp" #include #include @@ -14,8 +14,8 @@ int main() { { // Arena allocator benchmark - ArenaAllocator arena; - bench.run("ArenaAllocator", [&] { + Arena arena; + bench.run("Arena", [&] { void *ptr = arena.allocate_raw(alloc_size); ankerl::nanobench::doNotOptimizeAway(ptr); }); diff --git a/benchmarks/bench_format_comparison.cpp b/benchmarks/bench_format_comparison.cpp index 94f152c..1c09a5e 100644 --- a/benchmarks/bench_format_comparison.cpp +++ b/benchmarks/bench_format_comparison.cpp @@ -1,4 +1,4 @@ -#include "arena_allocator.hpp" +#include "arena.hpp" #include "format.hpp" #include #include @@ -26,7 +26,7 @@ void benchmark_simple_concatenation() { ankerl::nanobench::Bench bench; bench.title("Simple Concatenation").unit("op").warmup(100); - ArenaAllocator arena(64); + Arena arena(64); // Arena-based static_format bench.run("static_format", [&] { auto result = static_format(arena, "Hello ", "World", "!"); @@ -65,7 +65,7 @@ void benchmark_mixed_types() { ankerl::nanobench::Bench bench; bench.title("Mixed Types").unit("op").warmup(100); - ArenaAllocator arena(128); + Arena arena(128); // Arena-based static_format bench.run("static_format", [&] { auto result = @@ -106,7 +106,7 @@ void benchmark_complex_formatting() { ankerl::nanobench::Bench bench; bench.title("Complex Formatting").unit("op").warmup(100); - ArenaAllocator arena(128); + Arena arena(128); // Arena-based format (static_format doesn't support printf specifiers) bench.run("format", [&] { auto result = format(arena, "%-10s %5d %8.2f", TEST_STRING.c_str(), @@ -147,7 +147,7 @@ void benchmark_error_messages() { constexpr int line_number = 123; const std::string error_msg = "File not found"; - ArenaAllocator arena(128); + Arena arena(128); // Arena-based static_format (using string literals only) bench.run("static_format", [&] { auto result = static_format(arena, "Error ", error_code, ": ", @@ -188,7 +188,7 @@ void benchmark_double_formatting() { std::cout << "\n=== Simple Double Formatting ===\n"; // Validate that all formatters produce identical output - ArenaAllocator arena(128); + Arena arena(128); auto static_result = static_format(arena, TEST_DOUBLE); auto format_result = format(arena, "%.17g", TEST_DOUBLE); diff --git a/benchmarks/bench_metric.cpp b/benchmarks/bench_metric.cpp index e802d6a..dd900f4 100644 --- a/benchmarks/bench_metric.cpp +++ b/benchmarks/bench_metric.cpp @@ -1,6 +1,6 @@ #include -#include "arena_allocator.hpp" +#include "arena.hpp" #include "metric.hpp" #include @@ -62,7 +62,7 @@ struct ContentionEnvironment { render_latch = std::make_unique(2); background_threads.emplace_back([this]() { - ArenaAllocator arena; + Arena arena; render_latch->arrive_and_wait(); // Render thread signals it's ready @@ -201,7 +201,7 @@ int main() { }); } - ArenaAllocator arena; + Arena arena; std::string bench_name = "render() - " + std::to_string(scale) + " metrics each type"; diff --git a/benchmarks/bench_parser_comparison.cpp b/benchmarks/bench_parser_comparison.cpp index 7f103a4..e371df7 100644 --- a/benchmarks/bench_parser_comparison.cpp +++ b/benchmarks/bench_parser_comparison.cpp @@ -14,9 +14,9 @@ using namespace weaseldb::test_data; // Arena-based allocator adapter for RapidJSON -class RapidJsonArenaAllocator { +class RapidJsonArenaAdapter { public: - explicit RapidJsonArenaAllocator(ArenaAllocator *arena) : arena_(arena) {} + explicit RapidJsonArenaAdapter(Arena *arena) : arena_(arena) {} static const bool kNeedFree = false; @@ -37,7 +37,7 @@ public: } private: - ArenaAllocator *arena_; + Arena *arena_; }; // Arena-based RapidJSON SAX handler for commit request parsing @@ -56,7 +56,7 @@ public: std::string_view key, value, begin, end; }; - ArenaAllocator arena; + Arena arena; bool valid = true; std::string_view request_id, leader_id; uint64_t read_version = 0; diff --git a/design.md b/design.md index cfabf27..60c8068 100644 --- a/design.md +++ b/design.md @@ -48,7 +48,7 @@ ninja test # or ctest ``` **Individual targets:** -- `./test_arena_allocator` - Arena allocator unit tests +- `./test_arena` - Arena allocator unit tests - `./test_commit_request` - JSON parsing and validation tests - `./test_http_handler` - HTTP protocol handling tests - `./test_metric` - Metrics system tests @@ -56,7 +56,7 @@ ninja test # or ctest - `./test_server_connection_return` - Connection lifecycle tests **Benchmarking:** -- `./bench_arena_allocator` - Memory allocation performance +- `./bench_arena` - Memory allocation performance - `./bench_commit_request` - JSON parsing performance - `./bench_parser_comparison` - Compare vs nlohmann::json and RapidJSON - `./bench_metric` - Metrics system performance @@ -90,7 +90,7 @@ ninja test # or ctest ### Core Components -#### **Arena Allocator** (`src/arena_allocator.hpp`) +#### **Arena Allocator** (`src/arena.hpp`) Ultra-fast memory allocator optimized for request/response patterns: @@ -290,7 +290,7 @@ See [style.md](style.md) for comprehensive C++ coding standards and conventions. - **Server Creation**: Always use `Server::create()` factory method - direct construction is impossible - **Connection Creation**: Only the Server can create connections - no public constructor or factory method - **Connection Ownership**: Use unique_ptr semantics for safe ownership transfer between components -- **Arena Allocator Pattern**: Always use `ArenaAllocator` for temporary allocations within request processing +- **Arena Allocator Pattern**: Always use `Arena` for temporary allocations within request processing - **String View Usage**: Prefer `std::string_view` over `std::string` when pointing to arena-allocated memory - **Ownership Transfer**: Use `Server::release_back_to_server()` for returning connections to server from handlers - **JSON Token Lookup**: Use the gperf-generated perfect hash table in `json_tokens.hpp` for O(1) key recognition @@ -375,7 +375,7 @@ class HttpHandler : public ConnectionHandler { public: void on_data_arrived(std::string_view data, std::unique_ptr& conn_ptr) override { // Parse HTTP request using connection's arena - ArenaAllocator& arena = conn_ptr->get_arena(); + Arena& arena = conn_ptr->get_arena(); // Generate response conn_ptr->append_message("HTTP/1.1 200 OK\r\n\r\nHello World"); @@ -468,7 +468,7 @@ public: #### Arena-Based String Handling ```cpp // Preferred: String view with arena allocation to minimize copying -std::string_view process_json_key(const char* data, ArenaAllocator& arena); +std::string_view process_json_key(const char* data, Arena& arena); // Avoid: Unnecessary string copies std::string process_json_key(const char* data); @@ -513,13 +513,13 @@ ParseResult parse_commit_request(const char* json, CommitRequest& out); ### Build Targets **Test Executables:** -- `test_arena_allocator` - Arena allocator functionality tests +- `test_arena` - Arena allocator functionality tests - `test_commit_request` - JSON parsing and validation tests - `test_metric` - Metrics system functionality tests - Main server executable (compiled from `src/main.cpp`) **Benchmark Executables:** -- `bench_arena_allocator` - Arena allocator performance benchmarks +- `bench_arena` - Arena allocator performance benchmarks - `bench_commit_request` - JSON parsing performance benchmarks - `bench_parser_comparison` - Comparison benchmarks vs nlohmann::json and RapidJSON - `bench_metric` - Metrics system performance benchmarks diff --git a/src/arena_allocator.cpp b/src/arena.cpp similarity index 91% rename from src/arena_allocator.cpp rename to src/arena.cpp index e6863be..b6d2461 100644 --- a/src/arena_allocator.cpp +++ b/src/arena.cpp @@ -1,10 +1,10 @@ -#include "arena_allocator.hpp" +#include "arena.hpp" #include #include #include #include -ArenaAllocator::~ArenaAllocator() { +Arena::~Arena() { while (current_block_) { Block *prev = current_block_->prev; std::free(current_block_); @@ -12,13 +12,13 @@ ArenaAllocator::~ArenaAllocator() { } } -ArenaAllocator::ArenaAllocator(ArenaAllocator &&other) noexcept +Arena::Arena(Arena &&other) noexcept : initial_block_size_(other.initial_block_size_), current_block_(other.current_block_) { other.current_block_ = nullptr; } -ArenaAllocator &ArenaAllocator::operator=(ArenaAllocator &&other) noexcept { +Arena &Arena::operator=(Arena &&other) noexcept { if (this != &other) { while (current_block_) { Block *prev = current_block_->prev; @@ -34,7 +34,7 @@ ArenaAllocator &ArenaAllocator::operator=(ArenaAllocator &&other) noexcept { return *this; } -void ArenaAllocator::reset() { +void Arena::reset() { if (!current_block_) { return; } @@ -63,8 +63,8 @@ void ArenaAllocator::reset() { current_block_->offset = 0; } -void *ArenaAllocator::realloc_raw(void *ptr, uint32_t old_size, - uint32_t new_size, uint32_t alignment) { +void *Arena::realloc_raw(void *ptr, uint32_t old_size, uint32_t new_size, + uint32_t alignment) { if (ptr == nullptr) { return allocate_raw(new_size, alignment); } @@ -125,8 +125,8 @@ void *ArenaAllocator::realloc_raw(void *ptr, uint32_t old_size, return new_ptr; } -void ArenaAllocator::debug_dump(std::ostream &out, bool show_memory_map, - bool show_content, size_t content_limit) const { +void Arena::debug_dump(std::ostream &out, bool show_memory_map, + bool show_content, size_t content_limit) const { out << "=== Arena Debug Dump ===" << std::endl; if (!current_block_) { @@ -242,20 +242,20 @@ void ArenaAllocator::debug_dump(std::ostream &out, bool show_memory_map, } } -void ArenaAllocator::add_block(size_t size) { +void Arena::add_block(size_t size) { Block *new_block = Block::create(size, current_block_); current_block_ = new_block; } -size_t ArenaAllocator::calculate_next_block_size(size_t required_size) const { +size_t Arena::calculate_next_block_size(size_t required_size) const { size_t doubled_size = (current_block_ ? current_block_->size : 0) * 2; doubled_size = std::min(doubled_size, std::numeric_limits::max()); return std::max(required_size, doubled_size); } -void ArenaAllocator::dump_memory_contents(std::ostream &out, const char *data, - size_t size) { +void Arena::dump_memory_contents(std::ostream &out, const char *data, + size_t size) { const int bytes_per_line = 16; for (int64_t offset = 0; offset < static_cast(size); diff --git a/src/arena_allocator.hpp b/src/arena.hpp similarity index 93% rename from src/arena_allocator.hpp rename to src/arena.hpp index d428329..3c7096f 100644 --- a/src/arena_allocator.hpp +++ b/src/arena.hpp @@ -17,7 +17,7 @@ /** * @brief A high-performance arena allocator for bulk allocations. * - * ArenaAllocator provides extremely fast memory allocation (~1ns per + * Arena provides extremely fast memory allocation (~1ns per * allocation) by allocating large blocks and serving allocations from them * sequentially. It's designed for scenarios where many small objects need to be * allocated and can all be deallocated together. @@ -39,7 +39,7 @@ * * ## Usage Examples: * ```cpp - * ArenaAllocator arena(1024); + * Arena arena(1024); * void* ptr = arena.allocate_raw(100); * int* num = arena.construct(42); * arena.reset(); // Reuse arena memory @@ -52,13 +52,13 @@ * - Move semantics transfer ownership of all blocks * * ## Thread Safety: - * ArenaAllocator is **not thread-safe** - concurrent access from multiple + * Arena is **not thread-safe** - concurrent access from multiple * threads requires external synchronization. However, this design is * intentional for performance reasons and the WeaselDB architecture ensures * thread safety through ownership patterns: * * ### Safe Usage Patterns in WeaselDB: - * - **Per-Connection Instances**: Each Connection owns its own ArenaAllocator + * - **Per-Connection Instances**: Each Connection owns its own Arena * instance, accessed only by the thread that currently owns the connection * - **Single Owner Principle**: Connection ownership transfers atomically * between threads using unique_ptr, ensuring only one thread accesses the arena @@ -81,10 +81,10 @@ * - **No Shared State**: Each arena is completely isolated - no shared data * between different arena instances * - * @warning Do not share ArenaAllocator instances between threads. Use separate + * @warning Do not share Arena instances between threads. Use separate * instances per thread or per logical unit of work. */ -struct ArenaAllocator { +struct Arena { private: /** * @brief Internal block structure for the intrusive linked list. @@ -117,19 +117,17 @@ private: */ static Block *create(size_t size, Block *prev) { if (size > std::numeric_limits::max()) { - std::fprintf( - stderr, - "ArenaAllocator: Block size %zu exceeds maximum uint32_t value\n", - size); + std::fprintf(stderr, + "Arena: Block size %zu exceeds maximum uint32_t value\n", + size); std::abort(); } void *memory = std::aligned_alloc( alignof(Block), align_up(sizeof(Block) + size, alignof(Block))); if (!memory) { - std::fprintf( - stderr, - "ArenaAllocator: Failed to allocate memory block of size %zu\n", - size); + std::fprintf(stderr, + "Arena: Failed to allocate memory block of size %zu\n", + size); std::abort(); } size_t total_size = size + (prev ? prev->total_size : 0); @@ -142,7 +140,7 @@ private: public: /** - * @brief Construct an ArenaAllocator with the specified initial block size. + * @brief Construct an Arena with the specified initial block size. * * No memory is allocated until the first allocation request (lazy * initialization). The initial block size is used for the first block and as @@ -150,7 +148,7 @@ public: * * @param initial_size Size in bytes for the first block (default: 1024) */ - explicit ArenaAllocator(size_t initial_size = 1024) + explicit Arena(size_t initial_size = 1024) : initial_block_size_(initial_size), current_block_(nullptr) {} /** @@ -159,18 +157,18 @@ public: * Traverses the intrusive linked list backwards from current_block_, * freeing each block. This ensures no memory leaks. */ - ~ArenaAllocator(); + ~Arena(); /// Copy construction is not allowed (would be expensive and error-prone) - ArenaAllocator(const ArenaAllocator &) = delete; + Arena(const Arena &) = delete; /// Copy assignment is not allowed (would be expensive and error-prone) - ArenaAllocator &operator=(const ArenaAllocator &) = delete; + Arena &operator=(const Arena &) = delete; /** * @brief Move constructor - transfers ownership of all blocks. - * @param other The ArenaAllocator to move from (will be left empty) + * @param other The Arena to move from (will be left empty) */ - ArenaAllocator(ArenaAllocator &&other) noexcept; + Arena(Arena &&other) noexcept; /** * @brief Move assignment operator - transfers ownership of all blocks. @@ -178,10 +176,10 @@ public: * Frees any existing blocks in this allocator before taking ownership * of blocks from the other allocator. * - * @param other The ArenaAllocator to move from (will be left empty) + * @param other The Arena to move from (will be left empty) * @return Reference to this allocator */ - ArenaAllocator &operator=(ArenaAllocator &&other) noexcept; + Arena &operator=(Arena &&other) noexcept; /** * @brief Allocate raw memory with the specified size and alignment. @@ -293,7 +291,7 @@ public: T *realloc(T *ptr, uint32_t old_size, uint32_t new_size) { if (size_t(new_size) * sizeof(T) > std::numeric_limits::max()) { std::fprintf(stderr, - "ArenaAllocator: Reallocation size overflow for type %s " + "Arena: Reallocation size overflow for type %s " "(new_size=%u, sizeof(T)=%zu)\n", typeid(T).name(), new_size, sizeof(T)); std::abort(); @@ -306,7 +304,7 @@ public: * @brief Smart pointer for arena-allocated objects with non-trivial * destructors. * - * ArenaAllocator::Ptr calls the destructor but does not free memory (assumes + * Arena::Ptr calls the destructor but does not free memory (assumes * arena allocation). This provides RAII semantics for objects that need * cleanup without the overhead of individual memory deallocation. * @@ -363,13 +361,13 @@ public: * This method returns different types based on whether T is trivially * destructible: * - For trivially destructible types: returns T* (raw pointer) - * - For non-trivially destructible types: returns ArenaAllocator::Ptr + * - For non-trivially destructible types: returns Arena::Ptr * (smart pointer that calls destructor) * * @tparam T The type of object to construct * @tparam Args Types of constructor arguments * @param args Arguments to forward to T's constructor - * @return T* for trivially destructible types, ArenaAllocator::Ptr + * @return T* for trivially destructible types, Arena::Ptr * otherwise * @note Prints error to stderr and calls std::abort() if memory allocation * fails @@ -414,7 +412,7 @@ public: template T *allocate(uint32_t size) { static_assert( std::is_trivially_destructible_v, - "ArenaAllocator::allocate requires trivially destructible types. " + "Arena::allocate requires trivially destructible types. " "Objects allocated in the arena will not have their destructors " "called."); if (size == 0) { @@ -422,7 +420,7 @@ public: } if (size_t(size) * sizeof(T) > std::numeric_limits::max()) { std::fprintf(stderr, - "ArenaAllocator: Allocation size overflow for type %s " + "Arena: Allocation size overflow for type %s " "(size=%u, sizeof(T)=%zu)\n", typeid(T).name(), size, sizeof(T)); std::abort(); @@ -615,7 +613,7 @@ private: }; /** - * @brief STL-compatible allocator that uses ArenaAllocator for memory + * @brief STL-compatible allocator that uses Arena for memory * management. * @tparam T The type of objects to allocate */ @@ -633,7 +631,7 @@ public: using other = ArenaStlAllocator; }; - explicit ArenaStlAllocator(ArenaAllocator *arena) noexcept : arena_(arena) {} + explicit ArenaStlAllocator(Arena *arena) noexcept : arena_(arena) {} template ArenaStlAllocator(const ArenaStlAllocator &other) noexcept @@ -659,7 +657,7 @@ public: return arena_ != other.arena_; } - ArenaAllocator *arena_; + Arena *arena_; template friend class ArenaStlAllocator; }; @@ -669,7 +667,7 @@ public: /// arena-allocated Uses arena's realloc() for efficient growth without copying /// when possible template struct ArenaVector { - explicit ArenaVector(ArenaAllocator *arena) + explicit ArenaVector(Arena *arena) : arena_(arena), data_(nullptr), size_(0), capacity_(0) {} void push_back(const T &item) { @@ -713,7 +711,7 @@ private: capacity_ = new_capacity; } - ArenaAllocator *arena_; + Arena *arena_; T *data_; size_t size_; size_t capacity_; diff --git a/src/commit_request.hpp b/src/commit_request.hpp index c5ac4c0..35a93c0 100644 --- a/src/commit_request.hpp +++ b/src/commit_request.hpp @@ -5,7 +5,7 @@ #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" /** * @brief Represents a precondition for optimistic concurrency control. @@ -63,7 +63,7 @@ struct Operation { */ struct CommitRequest { private: - ArenaAllocator arena_; + Arena arena_; std::optional request_id_; std::string_view leader_id_; int64_t read_version_ = 0; @@ -155,7 +155,7 @@ public: * * @return Reference to the arena allocator */ - const ArenaAllocator &arena() const { return arena_; } + const Arena &arena() const { return arena_; } /** * @brief Get access to the underlying arena allocator for allocation. @@ -166,7 +166,7 @@ public: * * @return Reference to the arena allocator */ - ArenaAllocator &arena() { return arena_; } + Arena &arena() { return arena_; } /** * @brief Reset the commit request for reuse. diff --git a/src/connection.hpp b/src/connection.hpp index 8e32e0e..b8a18c8 100644 --- a/src/connection.hpp +++ b/src/connection.hpp @@ -8,7 +8,7 @@ #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" #include "connection_handler.hpp" #ifndef __has_feature @@ -28,7 +28,7 @@ * - RAII cleanup happens if I/O thread doesn't transfer back * * Arena allocator thread safety: - * Each Connection contains its own ArenaAllocator instance that is accessed + * Each Connection contains its own Arena instance that is accessed * exclusively by the thread that currently owns the connection. This ensures * thread safety without requiring locks: * - Arena is used by the owning thread for I/O buffers, request parsing, and @@ -117,7 +117,7 @@ struct Connection { /** * @brief Get access to the connection's arena allocator. * - * Returns a reference to this connection's private ArenaAllocator instance, + * Returns a reference to this connection's private Arena instance, * which should be used for all temporary allocations during request * processing. The arena provides extremely fast allocation (~1ns) and * automatic cleanup when the connection is destroyed or reset. @@ -135,7 +135,7 @@ struct Connection { * * Best practices: * ```cpp - * ArenaAllocator& arena = conn->get_arena(); + * Arena& arena = conn->get_arena(); * * // Allocate temporary parsing buffers * char* buffer = arena.allocate(1024); @@ -147,7 +147,7 @@ struct Connection { * std::vector> tokens{&arena}; * ``` */ - ArenaAllocator &get_arena() { return arena_; } + Arena &get_arena() { return arena_; } /** * @brief Get the unique identifier for this connection. @@ -345,7 +345,7 @@ private: const int64_t id_; const size_t epoll_index_; // Index of the epoll instance this connection uses struct sockaddr_storage addr_; // sockaddr_storage handles IPv4/IPv6 - ArenaAllocator arena_; + Arena arena_; ConnectionHandler *handler_; std::weak_ptr server_; // Weak reference to server for safe cleanup diff --git a/src/format.cpp b/src/format.cpp index d3ea57c..c867017 100644 --- a/src/format.cpp +++ b/src/format.cpp @@ -952,7 +952,7 @@ void DoubleTerm::write(char *&buf) const { buf = to_chars(buf, nullptr, s); } } // namespace detail -std::string_view format(ArenaAllocator &arena, const char *fmt, ...) { +std::string_view format(Arena &arena, const char *fmt, ...) { va_list args; // Try to format directly into available arena space (single-pass diff --git a/src/format.hpp b/src/format.hpp index 5a50c63..ce89959 100644 --- a/src/format.hpp +++ b/src/format.hpp @@ -7,7 +7,7 @@ #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" /** * @brief Runtime printf-style formatting with arena allocation optimization. @@ -47,7 +47,7 @@ * * ## Usage Examples: * ```cpp - * ArenaAllocator arena(1024); + * Arena arena(1024); * * // Basic formatting * auto msg = format(arena, "Hello %s!", "World"); @@ -83,7 +83,7 @@ * const char* * * ## Optimization Details: - * The function uses `ArenaAllocator::allocate_remaining_space()` to claim all + * The function uses `Arena::allocate_remaining_space()` to claim all * available arena space and attempt formatting. If successful, it shrinks the * allocation to the actual size used. If formatting fails (doesn't fit), it * falls back to the traditional two-pass approach: measure size, allocate @@ -92,7 +92,7 @@ * This strategy optimizes for the common case where available arena space is * sufficient, while maintaining correctness for all cases. */ -std::string_view format(ArenaAllocator &arena, const char *fmt, ...) +std::string_view format(Arena &arena, const char *fmt, ...) __attribute__((format(printf, 2, 3))); namespace detail { @@ -232,7 +232,7 @@ inline constexpr DoubleTerm term(double s) { return DoubleTerm(s); } * * ## Usage Examples: * ```cpp - * ArenaAllocator arena(1024); + * Arena arena(1024); * * // String concatenation * auto result1 = static_format(arena, "Hello ", "World", "!"); @@ -275,7 +275,7 @@ inline constexpr DoubleTerm term(double s) { return DoubleTerm(s); } * builds */ template -std::string_view static_format(ArenaAllocator &arena, Ts &&...ts) { +std::string_view static_format(Arena &arena, Ts &&...ts) { constexpr int upper_bound = (decltype(detail::term(ts))::kMaxLength + ...); char *result = arena.allocate(upper_bound); char *buf = result; diff --git a/src/http_handler.cpp b/src/http_handler.cpp index c5b0574..d348b3d 100644 --- a/src/http_handler.cpp +++ b/src/http_handler.cpp @@ -6,7 +6,7 @@ #include #include "api_url_parser.hpp" -#include "arena_allocator.hpp" +#include "arena.hpp" #include "cpu_work.hpp" #include "format.hpp" #include "json_commit_request_parser.hpp" @@ -37,7 +37,7 @@ auto banned_request_ids_memory_gauge = .create({}); // HttpConnectionState implementation -HttpConnectionState::HttpConnectionState(ArenaAllocator &arena) +HttpConnectionState::HttpConnectionState(Arena &arena) : arena(arena), current_header_field_buf(ArenaStlAllocator(&arena)), current_header_value_buf(ArenaStlAllocator(&arena)) { llhttp_settings_init(&settings); @@ -59,7 +59,7 @@ HttpConnectionState::HttpConnectionState(ArenaAllocator &arena) // HttpHandler implementation void HttpHandler::on_connection_established(Connection &conn) { // Allocate HTTP state in connection's arena - ArenaAllocator &arena = conn.get_arena(); + Arena &arena = conn.get_arena(); void *mem = arena.allocate_raw(sizeof(HttpConnectionState), alignof(HttpConnectionState)); auto *state = new (mem) HttpConnectionState(arena); @@ -70,7 +70,7 @@ void HttpHandler::on_connection_closed(Connection &conn) { // Arena cleanup happens automatically when connection is destroyed auto *state = static_cast(conn.user_data); if (state) { - // ArenaAllocator::Ptr automatically calls destructors + // Arena::Ptr automatically calls destructors state->~HttpConnectionState(); } conn.user_data = nullptr; @@ -171,7 +171,7 @@ void HttpHandler::on_data_arrived(std::string_view data, // If message is complete, route and handle the request if (state->message_complete) { // Copy URL to arena for in-place decoding - ArenaAllocator &arena = conn_ptr->get_arena(); + Arena &arena = conn_ptr->get_arena(); char *url_buffer = arena.allocate(state->url.size()); std::memcpy(url_buffer, state->url.data(), state->url.size()); @@ -244,7 +244,7 @@ void HttpHandler::handle_post_commit(Connection &conn, const char *error = state.commit_parser ? state.commit_parser->get_parse_error() : "No parser initialized"; - ArenaAllocator &arena = conn.get_arena(); + Arena &arena = conn.get_arena(); std::string_view error_msg = format(arena, "Parse failed: %s", error ? error : "Unknown error"); send_error_response(conn, 400, error_msg, state.connection_close); @@ -361,7 +361,7 @@ void HttpHandler::handle_delete_retention(Connection &conn, void HttpHandler::handle_get_metrics(Connection &conn, const HttpConnectionState &state) { metrics_counter.inc(); - ArenaAllocator &arena = conn.get_arena(); + Arena &arena = conn.get_arena(); auto metrics_span = metric::render(arena); // Calculate total size for the response body @@ -418,7 +418,7 @@ void HttpHandler::handle_not_found(Connection &conn, void HttpHandler::send_response(Connection &conn, int status_code, std::string_view content_type, std::string_view body, bool close_connection) { - ArenaAllocator &arena = conn.get_arena(); + Arena &arena = conn.get_arena(); auto *state = static_cast(conn.user_data); // Status text @@ -472,7 +472,7 @@ void HttpHandler::send_json_response(Connection &conn, int status_code, void HttpHandler::send_error_response(Connection &conn, int status_code, std::string_view message, bool close_connection) { - ArenaAllocator &arena = conn.get_arena(); + Arena &arena = conn.get_arena(); std::string_view json = format(arena, R"({"error":"%.*s"})", static_cast(message.size()), @@ -819,7 +819,7 @@ bool HttpHandler::process_persist_batch(BatchType &batch) { perfetto::Flow::Global(state->http_request_id)); const CommitRequest &commit_request = *state->commit_request; - ArenaAllocator &arena = commit_entry.connection->get_arena(); + Arena &arena = commit_entry.connection->get_arena(); std::string_view response; // Generate success response with actual assigned version diff --git a/src/http_handler.hpp b/src/http_handler.hpp index 40a24d6..f671799 100644 --- a/src/http_handler.hpp +++ b/src/http_handler.hpp @@ -9,7 +9,7 @@ #include #include "api_url_parser.hpp" -#include "arena_allocator.hpp" +#include "arena.hpp" #include "config.hpp" #include "connection.hpp" #include "connection_handler.hpp" @@ -28,7 +28,7 @@ struct RouteMatch; * Manages llhttp parser state and request data. */ struct HttpConnectionState { - ArenaAllocator &arena; + Arena &arena; llhttp_t parser; llhttp_settings_t settings; @@ -56,13 +56,13 @@ struct HttpConnectionState { 0; // X-Request-Id header value (for tracing/logging) // Streaming parser for POST requests - ArenaAllocator::Ptr commit_parser; - ArenaAllocator::Ptr commit_request; + Arena::Ptr commit_parser; + Arena::Ptr commit_request; bool parsing_commit = false; bool basic_validation_passed = false; // Set to true if basic validation passes - explicit HttpConnectionState(ArenaAllocator &arena); + explicit HttpConnectionState(Arena &arena); }; /** @@ -165,7 +165,7 @@ private: // Arena for banned request IDs and related data structures (sequence thread // only) - ArenaAllocator banned_request_arena; + Arena banned_request_arena; using BannedRequestIdSet = std::unordered_set, std::equal_to, diff --git a/src/json_commit_request_parser.hpp b/src/json_commit_request_parser.hpp index 6277539..6d28e45 100644 --- a/src/json_commit_request_parser.hpp +++ b/src/json_commit_request_parser.hpp @@ -70,7 +70,7 @@ private: ArenaString operation_type; // Constructor to initialize arena-allocated containers - explicit ParserContext(ArenaAllocator *arena) + explicit ParserContext(Arena *arena) : current_key(ArenaStlAllocator(arena)), current_string(ArenaStlAllocator(arena)), current_number(ArenaStlAllocator(arena)), @@ -79,7 +79,7 @@ private: has_read_version_been_set = false; } - void attach_arena(ArenaAllocator *arena) { + void attach_arena(Arena *arena) { current_key = ArenaString{ArenaStlAllocator(arena)}; current_string = ArenaString{ArenaStlAllocator(arena)}; current_number = ArenaString{ArenaStlAllocator(arena)}; diff --git a/src/metric.cpp b/src/metric.cpp index 4103d56..09eef40 100644 --- a/src/metric.cpp +++ b/src/metric.cpp @@ -25,7 +25,7 @@ #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" #include "format.hpp" // WeaselDB Metrics System Design: @@ -79,18 +79,18 @@ namespace metric { // - Content: Thread-specific metric instance state // // 3. TEMPORARY ARENAS: -// a) Caller-Provided Arenas (ArenaAllocator& parameters): +// a) Caller-Provided Arenas (Arena& parameters): // - Lifetime: Controlled by caller (function parameter) // - Purpose: Output formatting where caller controls result lifetime // - Owner: Caller owns arena and controls string lifetime -// - Example: render(ArenaAllocator& arena) - caller manages arena +// - Example: render(Arena& arena) - caller manages arena // lifecycle // // b) Stack-Owned Temporary Arenas: // - Lifetime: Function/scope lifetime (automatic destruction) // - Purpose: Internal temporary allocations for lookups and processing // - Owner: Function owns arena on stack, destroyed at scope exit -// - Example: intern_labels() creates ArenaAllocator lookup_arena(1024) +// - Example: intern_labels() creates Arena lookup_arena(1024) // // CRITICAL OWNERSHIP RULES: // @@ -124,8 +124,7 @@ static void validate_or_abort(bool condition, const char *message, } // Helper to copy a string into arena memory -static std::string_view arena_copy_string(std::string_view str, - ArenaAllocator &arena) { +static std::string_view arena_copy_string(std::string_view str, Arena &arena) { if (str.empty()) { return std::string_view{}; } @@ -142,7 +141,7 @@ struct LabelsKey { // Arena-owning constructor (copies strings into arena and formats as // Prometheus text) LabelsKey(std::span> l, - ArenaAllocator &arena) { + Arena &arena) { // Copy and validate all label keys and values, sort by key ArenaVector> labels(&arena); for (const auto &[key, value] : l) { @@ -251,7 +250,7 @@ template <> struct Family::State { ArenaStlAllocator>> instances; - explicit PerThreadState(ArenaAllocator &arena) + explicit PerThreadState(Arena &arena) : instances( ArenaStlAllocator>( &arena)) {} @@ -271,7 +270,7 @@ template <> struct Family::State { ArenaStlAllocator>>> callbacks; - State(ArenaAllocator &arena) + State(Arena &arena) : global_accumulated_values( ArenaStlAllocator>( &arena)), @@ -293,7 +292,7 @@ template <> struct Family::State { ArenaStlAllocator>>> callbacks; - State(ArenaAllocator &arena) + State(Arena &arena) : instances(ArenaStlAllocator>( &arena)), callbacks(ArenaStlAllocator< @@ -312,7 +311,7 @@ template <> struct Family::State { ArenaStlAllocator>> instances; - explicit PerThreadState(ArenaAllocator &arena) + explicit PerThreadState(Arena &arena) : instances( ArenaStlAllocator>( &arena)) {} @@ -326,7 +325,7 @@ template <> struct Family::State { ArenaStlAllocator>> global_accumulated_values; - State(ArenaAllocator &arena) + State(Arena &arena) : buckets(&arena), global_accumulated_values( ArenaStlAllocator>( @@ -371,54 +370,47 @@ struct Metric { static std::mutex mutex; // Global arena allocator for metric families and persistent global state - static ArenaAllocator &get_global_arena() { - static auto *global_arena = - new ArenaAllocator(64 * 1024); // 64KB initial size + static Arena &get_global_arena() { + static auto *global_arena = new Arena(64 * 1024); // 64KB initial size return *global_arena; } // Function-local statics to avoid static initialization order fiasco static auto &get_counter_families() { - using FamilyMap = - std::map::State>, - std::less, - ArenaStlAllocator< - std::pair::State>>>>; - static FamilyMap *counterFamilies = - new FamilyMap(ArenaStlAllocator< - std::pair::State>>>( + using FamilyMap = std::map< + std::string_view, Arena::Ptr::State>, + std::less, + ArenaStlAllocator::State>>>>; + static FamilyMap *counterFamilies = new FamilyMap( + ArenaStlAllocator::State>>>( &get_global_arena())); return *counterFamilies; } static auto &get_gauge_families() { - using FamilyMap = - std::map::State>, - std::less, - ArenaStlAllocator< - std::pair::State>>>>; + using FamilyMap = std::map< + std::string_view, Arena::Ptr::State>, + std::less, + ArenaStlAllocator::State>>>>; static FamilyMap *gaugeFamilies = new FamilyMap( ArenaStlAllocator::State>>>( + Arena::Ptr::State>>>( &get_global_arena())); return *gaugeFamilies; } static auto &get_histogram_families() { - using FamilyMap = - std::map::State>, - std::less, - ArenaStlAllocator< - std::pair::State>>>>; - static FamilyMap *histogramFamilies = - new FamilyMap(ArenaStlAllocator< - std::pair::State>>>( + using FamilyMap = std::map< + std::string_view, Arena::Ptr::State>, + std::less, + ArenaStlAllocator::State>>>>; + static FamilyMap *histogramFamilies = new FamilyMap( + ArenaStlAllocator::State>>>( &get_global_arena())); return *histogramFamilies; } @@ -446,8 +438,7 @@ struct Metric { // Registry of all thread arenas for memory tracking static auto &get_thread_arenas() { - using ThreadArenaMap = - std::unordered_map; + using ThreadArenaMap = std::unordered_map; static ThreadArenaMap *threadArenas = new ThreadArenaMap(); return *threadArenas; } @@ -460,7 +451,7 @@ struct Metric { // Thread cleanup for per-family thread-local storage struct ThreadInit { - ArenaAllocator arena; + Arena arena; ThreadInit() { // Register this thread's arena for memory tracking std::unique_lock _{mutex}; @@ -536,7 +527,7 @@ struct Metric { static thread_local ThreadInit thread_init; // Thread-local arena allocator for metric instances - static ArenaAllocator &get_thread_local_arena() { return thread_init.arena; } + static Arena &get_thread_local_arena() { return thread_init.arena; } // Thread cleanup now handled by ThreadInit RAII @@ -561,7 +552,7 @@ struct Metric { // lifetime) // Create temporary lookup key using stack-allocated arena - ArenaAllocator lookup_arena(1024); // Small arena for lookups only + Arena lookup_arena(1024); // Small arena for lookups only LabelsKey lookup_key{labels, lookup_arena}; // Use standard hash set lookup - lookup_key memory used transiently only @@ -736,7 +727,7 @@ struct Metric { ArenaVector thread_states; // Pre-resolved pointers Counter::State *global_state; // Pre-resolved global state pointer - CounterLabelData(const LabelsKey &key, ArenaAllocator &arena) + CounterLabelData(const LabelsKey &key, Arena &arena) : labels_key(key), thread_states(&arena), global_state(nullptr) {} }; @@ -754,7 +745,7 @@ struct Metric { Histogram::State *global_state; // Pre-resolved global state pointer size_t bucket_count; // Cache bucket count from family - HistogramLabelData(const LabelsKey &key, ArenaAllocator &arena) + HistogramLabelData(const LabelsKey &key, Arena &arena) : labels_key(key), thread_states(&arena), global_state(nullptr), bucket_count(0) {} }; @@ -764,7 +755,7 @@ struct Metric { ArenaVector> counter_data; ArenaVector> gauge_data; ArenaVector> histogram_data; - explicit LabelSets(ArenaAllocator &arena) + explicit LabelSets(Arena &arena) : counter_data(&arena), gauge_data(&arena), histogram_data(&arena) {} }; @@ -846,7 +837,7 @@ struct Metric { // Three-phase rendering system struct RenderPlan { - ArenaAllocator arena; + Arena arena; ArenaVector static_text{&arena}; ArenaVector instructions{&arena}; uint64_t registration_version; @@ -865,7 +856,7 @@ struct Metric { // Use temporary arena for formatting static text (will be interned to // global arena) - ArenaAllocator temp_arena(8192); // 8KB for temporary formatting + Arena temp_arena(8192); // 8KB for temporary formatting // Helper function to append an additional label to existing Prometheus // format @@ -1091,7 +1082,7 @@ struct Metric { // Phase 2: Execute phase - run instructions and generate dynamic text static ArenaVector - execute_render_plan(ArenaAllocator &arena, + execute_render_plan(Arena &arena, const ArenaVector &instructions) { ArenaVector dynamic_text(&arena); @@ -1191,7 +1182,7 @@ struct Metric { // Phase 3: Present phase - interleave static and dynamic text static ArenaVector - present_render_output(ArenaAllocator &arena, + present_render_output(Arena &arena, const ArenaVector &static_text, const ArenaVector &dynamic_text) { ArenaVector output(&arena); @@ -1213,7 +1204,7 @@ struct Metric { } // Build label sets once for reuse in both phases - static LabelSets build_label_sets(ArenaAllocator &arena) { + static LabelSets build_label_sets(Arena &arena) { LabelSets label_sets{arena}; // Build counter data with pre-resolved pointers @@ -1495,7 +1486,7 @@ Family create_gauge(std::string_view name, std::string_view help) { auto name_view = arena_copy_string(name, global_arena); auto &familyPtr = Metric::get_gauge_families()[name_view]; if (!familyPtr) { - // Family::State instances use ArenaAllocator::Ptr for automatic cleanup + // Family::State instances use Arena::Ptr for automatic cleanup familyPtr = global_arena.construct::State>(global_arena); familyPtr->name = name_view; familyPtr->help = arena_copy_string(help, global_arena); @@ -1519,7 +1510,7 @@ Family create_histogram(std::string_view name, std::string_view help, auto name_view = arena_copy_string(name, global_arena); auto &family_ptr = Metric::get_histogram_families()[name_view]; if (!family_ptr) { - // Family::State instances use ArenaAllocator::Ptr for automatic cleanup + // Family::State instances use Arena::Ptr for automatic cleanup family_ptr = global_arena.construct::State>(global_arena); family_ptr->name = name_view; family_ptr->help = arena_copy_string(help, global_arena); @@ -1688,7 +1679,7 @@ static double calculate_metrics_memory_usage() { } // New three-phase render implementation -std::span render(ArenaAllocator &arena) { +std::span render(Arena &arena) { // Initialize self-monitoring metrics (before taking global lock) static auto memory_gauge = []() { auto gauge = create_gauge("weaseldb_metrics_memory_bytes", diff --git a/src/metric.hpp b/src/metric.hpp index c1087fb..dbd2ea5 100644 --- a/src/metric.hpp +++ b/src/metric.hpp @@ -50,7 +50,7 @@ #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" namespace metric { @@ -220,7 +220,7 @@ std::vector exponential_buckets(double start, double factor, int count); // allocated in provided arena for zero-copy efficiency. The caller is // responsible for the arena's lifecycle. THREAD SAFETY: Serialized by global // mutex - callbacks need not be thread-safe -std::span render(ArenaAllocator &arena); +std::span render(Arena &arena); // Validation functions for Prometheus compatibility bool is_valid_metric_name(std::string_view name); diff --git a/style.md b/style.md index 1ad375f..7bbdbcb 100644 --- a/style.md +++ b/style.md @@ -114,7 +114,7 @@ std::string_view response = static_format(arena, "\r\n", body); // Printf-style formatting - runtime flexible -ArenaAllocator& arena = conn.get_arena(); +Arena& arena = conn.get_arena(); std::string_view response = format(arena, "HTTP/1.1 %d OK\r\n" "Content-Length: %zu\r\n" @@ -154,9 +154,9 @@ int32_t initial_block_size_; - **Full encapsulation still applies** - use `private:` sections to hide implementation details and maintain deep, capable structs - The struct keyword doesn't mean shallow design - it means interface-first organization for human readers ```cpp -struct ArenaAllocator { +struct Arena { // Public interface first - explicit ArenaAllocator(int64_t initial_size = 1024); + explicit Arena(int64_t initial_size = 1024); void* allocate_raw(int64_t size); private: @@ -228,7 +228,7 @@ template struct rebind { using type = T*; }; #include #include -#include "arena_allocator.hpp" +#include "arena.hpp" #include "commit_request.hpp" // Never this: @@ -248,16 +248,16 @@ std::unique_ptr parser; - **Explicit constructors** to prevent implicit conversions - **Delete copy operations** when inappropriate ```cpp -struct ArenaAllocator { - explicit ArenaAllocator(int64_t initial_size = 1024); +struct Arena { + explicit Arena(int64_t initial_size = 1024); // Copy construction is not allowed - ArenaAllocator(const ArenaAllocator &source) = delete; - ArenaAllocator &operator=(const ArenaAllocator &source) = delete; + Arena(const Arena &source) = delete; + Arena &operator=(const Arena &source) = delete; // Move semantics - ArenaAllocator(ArenaAllocator &&source) noexcept; - ArenaAllocator &operator=(ArenaAllocator &&source) noexcept; + Arena(Arena &&source) noexcept; + Arena &operator=(Arena &&source) noexcept; private: int32_t initial_block_size_; @@ -276,7 +276,7 @@ private: std::span operations() const { return operations_; } void process_data(std::string_view request_data); // ≤ 16 bytes, pass by value void process_request(const CommitRequest& commit_request); // > 16 bytes, pass by reference -ArenaAllocator(ArenaAllocator &&source) noexcept; +Arena(Arena &&source) noexcept; ``` ### Template Usage @@ -353,10 +353,10 @@ auto value = counter; // Implicit - memory ordering not explicit - **STL containers with arena allocators require default construction after arena reset** - `clear()` is not sufficient ```cpp // STL containers with arena allocators - correct reset pattern -std::vector> operations(arena_allocator); +std::vector> operations(arena); // ... use container ... operations = {}; // Default construct - clear() won't work correctly -arena_allocator.reset(); // Reset arena memory +arena.reset(); // Reset arena memory ``` ### Resource Management @@ -364,7 +364,7 @@ arena_allocator.reset(); // Reset arena memory - **Move semantics** for efficient resource transfer - **Explicit cleanup** methods where appropriate ```cpp -~ArenaAllocator() { +~Arena() { while (current_block_) { Block *prev = current_block_->prev; std::free(current_block_); @@ -395,7 +395,7 @@ enum class [[nodiscard]] ParseResult { Success, InvalidJson, MissingField }; // System failure - abort immediately void* memory = std::malloc(size); if (!memory) { - std::fprintf(stderr, "ArenaAllocator: Memory allocation failed\n"); + std::fprintf(stderr, "Arena: Memory allocation failed\n"); std::abort(); } // ... use memory, eventually std::free(memory) @@ -529,8 +529,8 @@ Connection(struct sockaddr_storage addr, int fd, int64_t id, - **SUBCASE** for related test variations - **Fresh instances** for each test to avoid state contamination ```cpp -TEST_CASE("ArenaAllocator basic allocation") { - ArenaAllocator arena; +TEST_CASE("Arena basic allocation") { + Arena arena; SUBCASE("allocate zero bytes returns nullptr") { void *ptr = arena.allocate_raw(0); diff --git a/tests/test_arena_allocator.cpp b/tests/test_arena.cpp similarity index 87% rename from tests/test_arena_allocator.cpp rename to tests/test_arena.cpp index f630ab9..6cc6d9e 100644 --- a/tests/test_arena_allocator.cpp +++ b/tests/test_arena.cpp @@ -1,27 +1,27 @@ -#include "arena_allocator.hpp" +#include "arena.hpp" #include "format.hpp" #include #include #include #include -TEST_CASE("ArenaAllocator basic construction") { - ArenaAllocator arena; +TEST_CASE("Arena basic construction") { + Arena arena; CHECK(arena.num_blocks() == 0); CHECK(arena.used_bytes() == 0); CHECK(arena.total_allocated() == 0); CHECK(arena.available_in_current_block() == 0); } -TEST_CASE("ArenaAllocator custom initial size") { - ArenaAllocator arena(2048); +TEST_CASE("Arena custom initial size") { + Arena arena(2048); CHECK(arena.num_blocks() == 0); CHECK(arena.total_allocated() == 0); CHECK(arena.available_in_current_block() == 0); } -TEST_CASE("ArenaAllocator basic allocation") { - ArenaAllocator arena; +TEST_CASE("Arena basic allocation") { + Arena arena; SUBCASE("allocate zero bytes returns nullptr") { void *ptr = arena.allocate_raw(0); @@ -46,8 +46,8 @@ TEST_CASE("ArenaAllocator basic allocation") { } } -TEST_CASE("ArenaAllocator alignment") { - ArenaAllocator arena; +TEST_CASE("Arena alignment") { + Arena arena; SUBCASE("default alignment") { void *ptr = arena.allocate_raw(1); @@ -66,14 +66,14 @@ TEST_CASE("ArenaAllocator alignment") { } SUBCASE("alignment with larger allocations") { - ArenaAllocator fresh_arena; + Arena fresh_arena; void *ptr = fresh_arena.allocate_raw(100, 64); CHECK(reinterpret_cast(ptr) % 64 == 0); } } -TEST_CASE("ArenaAllocator block management") { - ArenaAllocator arena(128); +TEST_CASE("Arena block management") { + Arena arena(128); SUBCASE("single block allocation") { void *ptr = arena.allocate_raw(64); @@ -98,8 +98,8 @@ TEST_CASE("ArenaAllocator block management") { } } -TEST_CASE("ArenaAllocator construct template") { - ArenaAllocator arena; +TEST_CASE("Arena construct template") { + Arena arena; SUBCASE("construct int") { int *ptr = arena.construct(42); @@ -142,8 +142,8 @@ TEST_CASE("ArenaAllocator construct template") { } } -TEST_CASE("ArenaAllocator reset functionality") { - ArenaAllocator arena; +TEST_CASE("Arena reset functionality") { + Arena arena; arena.allocate_raw(100); arena.allocate_raw(200); @@ -159,8 +159,8 @@ TEST_CASE("ArenaAllocator reset functionality") { CHECK(arena.used_bytes() == 50); } -TEST_CASE("ArenaAllocator reset memory leak test") { - ArenaAllocator arena(32); // Smaller initial size +TEST_CASE("Arena reset memory leak test") { + Arena arena(32); // Smaller initial size // Force multiple blocks arena.allocate_raw(30); // First block (32 bytes) @@ -191,8 +191,8 @@ TEST_CASE("ArenaAllocator reset memory leak test") { CHECK(arena.used_bytes() == 20); } -TEST_CASE("ArenaAllocator memory tracking") { - ArenaAllocator arena(512); +TEST_CASE("Arena memory tracking") { + Arena arena(512); CHECK(arena.total_allocated() == 0); CHECK(arena.used_bytes() == 0); @@ -210,8 +210,8 @@ TEST_CASE("ArenaAllocator memory tracking") { CHECK(arena.total_allocated() >= 1024); } -TEST_CASE("ArenaAllocator stress test") { - ArenaAllocator arena(1024); +TEST_CASE("Arena stress test") { + Arena arena(1024); SUBCASE("many small allocations") { std::vector ptrs; @@ -237,13 +237,13 @@ TEST_CASE("ArenaAllocator stress test") { } } -TEST_CASE("ArenaAllocator move semantics") { - ArenaAllocator arena1(512); +TEST_CASE("Arena move semantics") { + Arena arena1(512); arena1.allocate_raw(100); size_t used_bytes = arena1.used_bytes(); size_t num_blocks = arena1.num_blocks(); - ArenaAllocator arena2 = std::move(arena1); + Arena arena2 = std::move(arena1); CHECK(arena2.used_bytes() == used_bytes); CHECK(arena2.num_blocks() == num_blocks); @@ -251,16 +251,16 @@ TEST_CASE("ArenaAllocator move semantics") { CHECK(ptr != nullptr); } -TEST_CASE("ArenaAllocator edge cases") { +TEST_CASE("Arena edge cases") { SUBCASE("very small block size") { - ArenaAllocator arena(16); + Arena arena(16); void *ptr = arena.allocate_raw(8); CHECK(ptr != nullptr); CHECK(arena.num_blocks() == 1); } SUBCASE("allocation exactly block size") { - ArenaAllocator arena(64); + Arena arena(64); void *ptr = arena.allocate_raw(64); CHECK(ptr != nullptr); CHECK(arena.num_blocks() == 1); @@ -271,7 +271,7 @@ TEST_CASE("ArenaAllocator edge cases") { } SUBCASE("multiple resets") { - ArenaAllocator arena; + Arena arena; for (int i = 0; i < 10; ++i) { arena.allocate_raw(100); arena.reset(); @@ -290,8 +290,8 @@ struct TestPOD { } }; -TEST_CASE("ArenaAllocator with custom objects") { - ArenaAllocator arena; +TEST_CASE("Arena with custom objects") { + Arena arena; TestPOD *obj1 = arena.construct(42, "first"); TestPOD *obj2 = arena.construct(84, "second"); @@ -305,8 +305,8 @@ TEST_CASE("ArenaAllocator with custom objects") { CHECK(std::strcmp(obj2->name, "second") == 0); } -TEST_CASE("ArenaAllocator geometric growth policy") { - ArenaAllocator arena(64); +TEST_CASE("Arena geometric growth policy") { + Arena arena(64); SUBCASE("normal geometric growth doubles size") { arena.allocate_raw(60); // Fill first block @@ -338,8 +338,8 @@ TEST_CASE("ArenaAllocator geometric growth policy") { } } -TEST_CASE("ArenaAllocator alignment edge cases") { - ArenaAllocator arena; +TEST_CASE("Arena alignment edge cases") { + Arena arena; SUBCASE("unaligned then aligned allocation") { void *ptr1 = arena.allocate_raw(1, 1); @@ -351,20 +351,20 @@ TEST_CASE("ArenaAllocator alignment edge cases") { } SUBCASE("large alignment requirements") { - ArenaAllocator fresh_arena; + Arena fresh_arena; void *ptr = fresh_arena.allocate_raw(1, 128); CHECK(ptr != nullptr); CHECK(reinterpret_cast(ptr) % 128 == 0); } } -TEST_CASE("ArenaAllocator realloc functionality") { - ArenaAllocator arena; +TEST_CASE("Arena realloc functionality") { + Arena arena; SUBCASE("realloc edge cases") { // realloc with new_size == 0 returns nullptr and reclaims memory if it's // the last allocation - ArenaAllocator fresh_arena(256); + Arena fresh_arena(256); void *ptr = fresh_arena.allocate_raw(100); size_t used_before = fresh_arena.used_bytes(); CHECK(used_before == 100); @@ -374,7 +374,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { CHECK(fresh_arena.used_bytes() == 0); // Memory should be reclaimed // Test case where it's NOT the last allocation - memory cannot be reclaimed - ArenaAllocator arena2(256); + Arena arena2(256); void *ptr1 = arena2.allocate_raw(50); (void)arena2.allocate_raw(50); size_t used_before2 = arena2.used_bytes(); @@ -397,7 +397,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("in-place extension - growing") { - ArenaAllocator fresh_arena(1024); + Arena fresh_arena(1024); void *ptr = fresh_arena.allocate_raw(100); CHECK(ptr != nullptr); @@ -418,7 +418,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("in-place shrinking") { - ArenaAllocator fresh_arena(1024); + Arena fresh_arena(1024); void *ptr = fresh_arena.allocate_raw(200); std::memset(ptr, 0xCD, 200); @@ -435,7 +435,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("copy when can't extend in place") { - ArenaAllocator fresh_arena(256); // Larger block to avoid edge cases + Arena fresh_arena(256); // Larger block to avoid edge cases // Allocate first chunk void *ptr1 = fresh_arena.allocate_raw(60); @@ -470,7 +470,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("copy when insufficient space for extension") { - ArenaAllocator fresh_arena(100); + Arena fresh_arena(100); // Allocate almost all space void *ptr = fresh_arena.allocate_raw(90); @@ -489,7 +489,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("realloc with custom alignment") { - ArenaAllocator fresh_arena(1024); + Arena fresh_arena(1024); // Allocate with specific alignment void *ptr = fresh_arena.allocate_raw(50, 16); @@ -509,7 +509,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { } SUBCASE("realloc stress test") { - ArenaAllocator fresh_arena(512); + Arena fresh_arena(512); void *ptr = fresh_arena.allocate_raw(50); size_t current_size = 50; @@ -536,7 +536,7 @@ TEST_CASE("ArenaAllocator realloc functionality") { TEST_CASE("format function fallback codepath") { SUBCASE("single-pass optimization success") { - ArenaAllocator arena(128); + Arena arena(128); auto result = format(arena, "Hello %s! Number: %d", "World", 42); CHECK(result == "Hello World! Number: 42"); CHECK(result.length() == 23); @@ -544,7 +544,7 @@ TEST_CASE("format function fallback codepath") { SUBCASE("fallback when speculative formatting fails") { // Create arena with limited space to force fallback - ArenaAllocator arena(16); + Arena arena(16); // Consume most space to leave insufficient room for speculative formatting arena.allocate(10); @@ -561,7 +561,7 @@ TEST_CASE("format function fallback codepath") { } SUBCASE("edge case - exactly available space") { - ArenaAllocator arena(32); + Arena arena(32); arena.allocate(20); // Leave 12 bytes CHECK(arena.available_in_current_block() == 12); @@ -574,13 +574,13 @@ TEST_CASE("format function fallback codepath") { SUBCASE("allocate_remaining_space postcondition") { // Test empty arena - ArenaAllocator empty_arena(64); + Arena empty_arena(64); auto space1 = empty_arena.allocate_remaining_space(); CHECK(space1.allocated_bytes >= 1); CHECK(space1.allocated_bytes == 64); // Test full arena (should create new block) - ArenaAllocator full_arena(32); + Arena full_arena(32); full_arena.allocate(32); // Fill completely auto space2 = full_arena.allocate_remaining_space(); CHECK(space2.allocated_bytes >= 1); @@ -588,7 +588,7 @@ TEST_CASE("format function fallback codepath") { } SUBCASE("format error handling") { - ArenaAllocator arena(64); + Arena arena(64); // Test with invalid format (should return empty string_view) // Note: This is hard to trigger reliably across platforms, @@ -598,7 +598,7 @@ TEST_CASE("format function fallback codepath") { } } -// Test object with non-trivial destructor for ArenaAllocator::Ptr testing +// Test object with non-trivial destructor for Arena::Ptr testing class TestObject { public: static int destructor_count; @@ -625,11 +625,11 @@ struct TrivialObject { TrivialObject(int v) : value(v) {} }; -TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { +TEST_CASE("Arena::Ptr smart pointer functionality") { TestObject::reset_counters(); SUBCASE("construct returns raw pointer for trivially destructible types") { - ArenaAllocator arena; + Arena arena; auto ptr = arena.construct(42); static_assert(std::is_same_v, @@ -639,23 +639,22 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(ptr->value == 42); } - SUBCASE("construct returns ArenaAllocator::Ptr for non-trivially " + SUBCASE("construct returns Arena::Ptr for non-trivially " "destructible types") { - ArenaAllocator arena; + Arena arena; auto ptr = arena.construct(42); - static_assert( - std::is_same_v>, - "construct() should return ArenaAllocator::Ptr for non-trivially " - "destructible types"); + static_assert(std::is_same_v>, + "construct() should return Arena::Ptr for non-trivially " + "destructible types"); CHECK(ptr); CHECK(ptr->value == 42); CHECK(TestObject::constructor_count == 1); CHECK(TestObject::destructor_count == 0); } - SUBCASE("ArenaAllocator::Ptr calls destructor on destruction") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr calls destructor on destruction") { + Arena arena; { auto ptr = arena.construct(42); @@ -666,8 +665,8 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(TestObject::destructor_count == 1); } - SUBCASE("ArenaAllocator::Ptr move semantics") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr move semantics") { + Arena arena; auto ptr1 = arena.construct(42); CHECK(TestObject::constructor_count == 1); @@ -682,8 +681,8 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(TestObject::destructor_count == 1); // Destructor called } - SUBCASE("ArenaAllocator::Ptr access operators") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr access operators") { + Arena arena; auto ptr = arena.construct(123); @@ -703,8 +702,8 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(static_cast(ptr) == true); } - SUBCASE("ArenaAllocator::Ptr reset functionality") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr reset functionality") { + Arena arena; auto ptr = arena.construct(42); CHECK(TestObject::constructor_count == 1); @@ -723,8 +722,8 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(TestObject::destructor_count == 1); } - SUBCASE("ArenaAllocator::Ptr release functionality") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr release functionality") { + Arena arena; auto ptr = arena.construct(42); TestObject *raw_ptr = ptr.release(); @@ -739,8 +738,8 @@ TEST_CASE("ArenaAllocator::Ptr smart pointer functionality") { CHECK(TestObject::destructor_count == 1); } - SUBCASE("ArenaAllocator::Ptr move assignment") { - ArenaAllocator arena; + SUBCASE("Arena::Ptr move assignment") { + Arena arena; auto ptr1 = arena.construct(42); auto ptr2 = arena.construct(84); diff --git a/tests/test_http_handler.cpp b/tests/test_http_handler.cpp index b80dae4..f9499bf 100644 --- a/tests/test_http_handler.cpp +++ b/tests/test_http_handler.cpp @@ -1,4 +1,4 @@ -#include "arena_allocator.hpp" +#include "arena.hpp" #include "http_handler.hpp" #include "perfetto_categories.hpp" #include @@ -12,13 +12,13 @@ std::atomic activeConnections{0}; // Simple test helper since Connection has complex constructor requirements struct TestConnectionData { - ArenaAllocator arena; + Arena arena; std::string message_buffer; void *user_data = nullptr; void append_message(std::string_view data) { message_buffer += data; } - ArenaAllocator &get_arena() { return arena; } + Arena &get_arena() { return arena; } const std::string &getResponse() const { return message_buffer; } void clearResponse() { message_buffer.clear(); } void reset() { diff --git a/tests/test_metric.cpp b/tests/test_metric.cpp index 18a7b98..88fb288 100644 --- a/tests/test_metric.cpp +++ b/tests/test_metric.cpp @@ -1,6 +1,6 @@ #include -#include "arena_allocator.hpp" +#include "arena.hpp" #include "metric.hpp" #include @@ -279,7 +279,7 @@ TEST_CASE("callback-based metrics") { []() { return 42.0; }); // Callback should be called during render - ArenaAllocator arena; + Arena arena; auto output = metric::render(arena); CHECK(output.size() > 0); } @@ -288,7 +288,7 @@ TEST_CASE("callback-based metrics") { gauge_family.register_callback({{"type", "callback"}}, []() { return 123.5; }); - ArenaAllocator arena; + Arena arena; auto output = metric::render(arena); CHECK(output.size() > 0); } @@ -304,7 +304,7 @@ TEST_CASE("callback-based metrics") { } TEST_CASE("prometheus text format rendering") { - ArenaAllocator arena; + Arena arena; // Create some metrics auto counter_family = @@ -463,7 +463,7 @@ TEST_CASE("thread safety") { threads.emplace_back([&]() { start_latch.arrive_and_wait(); - ArenaAllocator arena; + Arena arena; auto output = metric::render(arena); if (output.size() > 0) { success_count.fetch_add(1); @@ -503,7 +503,7 @@ TEST_CASE("thread counter cleanup bug") { // Measure actual values from within the thread (before ThreadInit // destructor runs) - ArenaAllocator thread_arena; + Arena thread_arena; auto thread_output = metric::render(thread_arena); for (const auto &line : thread_output) { @@ -538,7 +538,7 @@ TEST_CASE("thread counter cleanup bug") { worker.join(); // Measure values after thread cleanup - ArenaAllocator arena; + Arena arena; auto output = metric::render(arena); double counter_value_after = 0; @@ -615,7 +615,7 @@ TEST_CASE("error conditions") { TEST_CASE("memory management") { SUBCASE("arena allocation in render") { - ArenaAllocator arena; + Arena arena; auto initial_used = arena.used_bytes(); auto counter_family = metric::create_counter("memory_test", "Memory test"); @@ -636,7 +636,7 @@ TEST_CASE("memory management") { } SUBCASE("arena reset behavior") { - ArenaAllocator arena; + Arena arena; auto counter_family = metric::create_counter("reset_test", "Reset test"); auto counter = counter_family.create({}); @@ -659,7 +659,7 @@ TEST_CASE("render output deterministic order golden test") { // Clean slate - reset all metrics before this test metric::reset_metrics_for_testing(); - ArenaAllocator arena; + Arena arena; // Create a comprehensive set of metrics with deliberate ordering // to test deterministic output diff --git a/tools/debug_arena.cpp b/tools/debug_arena.cpp index 8f572a8..a4fe5d9 100644 --- a/tools/debug_arena.cpp +++ b/tools/debug_arena.cpp @@ -10,7 +10,7 @@ struct ArenaDebugger { const CommitRequest &commit_request; - const ArenaAllocator &arena; + const Arena &arena; std::unordered_set referenced_addresses; explicit ArenaDebugger(const CommitRequest &cr)