Implement spend_cpu_cycles in assembly
The compiler was unrolling it previously, so we're doing assembly now for consistency.
This commit is contained in:
@@ -5,7 +5,7 @@
|
||||
#include "../src/cpu_work.hpp"
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
int iterations = DEFAULT_HEALTH_CHECK_ITERATIONS; // Default: 7000
|
||||
int iterations = DEFAULT_HEALTH_CHECK_ITERATIONS;
|
||||
|
||||
if (argc > 1) {
|
||||
try {
|
||||
|
||||
@@ -51,7 +51,7 @@ Controls benchmarking and health check behavior.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `ok_resolve_iterations` | integer | `7000` | CPU-intensive loop iterations for `/ok` requests in resolve stage. 0 = health check only, 7000 = default benchmark load (~650ns, 1M req/s) |
|
||||
| `ok_resolve_iterations` | integer | `4000` | CPU-intensive loop iterations for `/ok` requests in resolve stage. 0 = health check only, 4000 = default benchmark load (~650ns, 1M req/s) |
|
||||
|
||||
## Example Configuration
|
||||
|
||||
|
||||
@@ -29,6 +29,6 @@ keepalive_interval_seconds = 30
|
||||
[benchmark]
|
||||
# CPU-intensive loop iterations for /ok requests in resolve stage
|
||||
# 0 = health check only (no CPU work)
|
||||
# 7000 = default benchmark load (650ns CPU work, 1M req/s)
|
||||
# 4000 = default benchmark load (740ns CPU work, 1M req/s)
|
||||
# Higher values = more CPU stress testing
|
||||
ok_resolve_iterations = 0
|
||||
|
||||
@@ -79,8 +79,8 @@ struct SubscriptionConfig {
|
||||
*/
|
||||
struct BenchmarkConfig {
|
||||
/// CPU-intensive loop iterations for /ok requests in resolve stage
|
||||
/// 0 = health check only, 7000 = default benchmark load (650ns, 1M req/s)
|
||||
int ok_resolve_iterations = 7000;
|
||||
/// 0 = health check only, 4000 = default benchmark load (740ns, 1M req/s)
|
||||
int ok_resolve_iterations = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,10 +1,61 @@
|
||||
#include "cpu_work.hpp"
|
||||
|
||||
void spend_cpu_cycles(int iterations) {
|
||||
// Perform CPU-intensive work that cannot be optimized away
|
||||
// Use inline assembly to prevent compiler optimization
|
||||
for (int i = 0; i < iterations; ++i) {
|
||||
// Simple loop with inline assembly barrier to prevent optimization
|
||||
asm volatile("");
|
||||
}
|
||||
}
|
||||
#if defined(__x86_64__) || defined(__amd64__)
|
||||
// x86-64 file-scoped assembly implementation
|
||||
#ifdef __APPLE__
|
||||
asm(".text\n"
|
||||
".globl _spend_cpu_cycles\n"
|
||||
"_spend_cpu_cycles:\n"
|
||||
" test %edi, %edi\n" // Test if iterations <= 0
|
||||
" jle .L_end\n" // Jump to end if <= 0
|
||||
".L_loop:\n" // Loop start
|
||||
" dec %edi\n" // Decrement iterations
|
||||
" jnz .L_loop\n" // Jump back if not zero
|
||||
".L_end:\n" // End
|
||||
" ret\n" // Return
|
||||
);
|
||||
#else
|
||||
asm(".text\n"
|
||||
".globl spend_cpu_cycles\n"
|
||||
".type spend_cpu_cycles, @function\n"
|
||||
"spend_cpu_cycles:\n"
|
||||
" test %edi, %edi\n" // Test if iterations <= 0
|
||||
" jle .L_end\n" // Jump to end if <= 0
|
||||
".L_loop:\n" // Loop start
|
||||
" dec %edi\n" // Decrement iterations
|
||||
" jnz .L_loop\n" // Jump back if not zero
|
||||
".L_end:\n" // End
|
||||
" ret\n" // Return
|
||||
".size spend_cpu_cycles, .-spend_cpu_cycles\n");
|
||||
#endif
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
// ARM64 file-scoped assembly implementation
|
||||
#ifdef __APPLE__
|
||||
asm(".text\n"
|
||||
".globl _spend_cpu_cycles\n"
|
||||
"_spend_cpu_cycles:\n"
|
||||
" cmp w0, wzr\n" // Compare iterations with zero
|
||||
" b.le .L_end\n" // Branch to end if <= 0
|
||||
".L_loop:\n" // Loop start
|
||||
" subs w0, w0, #1\n" // Decrement iterations and set flags
|
||||
" b.ne .L_loop\n" // Branch back if not zero
|
||||
".L_end:\n" // End
|
||||
" ret\n" // Return
|
||||
);
|
||||
#else
|
||||
asm(".text\n"
|
||||
".globl spend_cpu_cycles\n"
|
||||
".type spend_cpu_cycles, %function\n"
|
||||
"spend_cpu_cycles:\n"
|
||||
" cmp w0, wzr\n" // Compare iterations with zero
|
||||
" b.le .L_end\n" // Branch to end if <= 0
|
||||
".L_loop:\n" // Loop start
|
||||
" subs w0, w0, #1\n" // Decrement iterations and set flags
|
||||
" b.ne .L_loop\n" // Branch back if not zero
|
||||
".L_end:\n" // End
|
||||
" ret\n" // Return
|
||||
".size spend_cpu_cycles, spend_cpu_cycles\n");
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
extern "C" {
|
||||
|
||||
/**
|
||||
* @brief Perform CPU-intensive work for benchmarking and health check purposes.
|
||||
*
|
||||
@@ -10,6 +12,7 @@
|
||||
* @param iterations Number of loop iterations to perform
|
||||
*/
|
||||
void spend_cpu_cycles(int iterations);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Default CPU work iterations for health check benchmarking.
|
||||
@@ -18,4 +21,4 @@ void spend_cpu_cycles(int iterations);
|
||||
* /ok health check resolve stage. This value provides 650ns of CPU work
|
||||
* and achieves 1M requests/second throughput through the 4-stage pipeline.
|
||||
*/
|
||||
constexpr int DEFAULT_HEALTH_CHECK_ITERATIONS = 7000;
|
||||
constexpr int DEFAULT_HEALTH_CHECK_ITERATIONS = 4000;
|
||||
|
||||
@@ -79,6 +79,11 @@ void HttpHandler::on_connection_closed(Connection &conn) {
|
||||
void HttpHandler::on_write_buffer_drained(
|
||||
std::unique_ptr<Connection> &conn_ptr) {
|
||||
// Reset arena after all messages have been written for the next request
|
||||
auto *state = static_cast<HttpConnectionState *>(conn_ptr->user_data);
|
||||
if (state) {
|
||||
TRACE_EVENT("http", "reply",
|
||||
perfetto::Flow::Global(state->http_request_id));
|
||||
}
|
||||
on_connection_closed(*conn_ptr);
|
||||
conn_ptr->reset();
|
||||
on_connection_established(*conn_ptr);
|
||||
|
||||
@@ -88,7 +88,7 @@ struct HttpHandler : ConnectionHandler {
|
||||
resolveThread = std::thread{[this]() {
|
||||
pthread_setname_np(pthread_self(), "txn-resolve");
|
||||
for (;;) {
|
||||
auto guard = commitPipeline.acquire<1, 0>();
|
||||
auto guard = commitPipeline.acquire<1, 0>(/*maxBatch*/ 1);
|
||||
if (process_resolve_batch(guard.batch)) {
|
||||
return; // Shutdown signal received
|
||||
}
|
||||
|
||||
@@ -30,4 +30,4 @@ keepalive_interval_seconds = 30
|
||||
|
||||
[benchmark]
|
||||
# Use original benchmark load for testing
|
||||
ok_resolve_iterations = 7000
|
||||
ok_resolve_iterations = 4000
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Summary
|
||||
|
||||
WeaselDB's /ok health check endpoint achieves 1M requests/second with 650ns of configurable CPU work per request through the 4-stage commit pipeline, while maintaining 0% CPU usage when idle. The configurable CPU work serves both as a health check (validating the full pipeline) and as a benchmarking tool for measuring per-request processing capacity.
|
||||
WeaselDB's /ok health check endpoint achieves 1M requests/second with 740ns of configurable CPU work per request through the 4-stage commit pipeline, while maintaining 0% CPU usage when idle. The configurable CPU work serves both as a health check (validating the full pipeline) and as a benchmarking tool for measuring per-request processing capacity.
|
||||
|
||||
## Performance Metrics
|
||||
|
||||
@@ -22,9 +22,9 @@ WeaselDB's /ok health check endpoint achieves 1M requests/second with 650ns of c
|
||||
|
||||
**Health Check Pipeline (/ok endpoint)**:
|
||||
- **Throughput**: 1.0M requests/second
|
||||
- **Configurable CPU work**: 650ns (7000 iterations, validated with nanobench)
|
||||
- **Configurable CPU work**: 740ns (4000 iterations, validated with nanobench)
|
||||
- **Theoretical maximum CPU time**: 1000ns (1,000,000,000ns ÷ 1,000,000 req/s)
|
||||
- **CPU work efficiency**: 65% (650ns ÷ 1000ns)
|
||||
- **CPU work efficiency**: 74% (740ns ÷ 1000ns)
|
||||
- **Pipeline stages**: Sequence (noop) → Resolve (CPU work) → Persist (response) → Release (cleanup)
|
||||
- **CPU usage when idle**: 0%
|
||||
|
||||
@@ -41,12 +41,9 @@ WeaselDB's /ok health check endpoint achieves 1M requests/second with 650ns of c
|
||||
- **Maintained**: 100,000 spin iterations necessary to prevent thread descheduling
|
||||
- **Result**: Same throughput with more efficient spinning
|
||||
|
||||
**Stage-0 Batch Size Optimization**:
|
||||
- **Changed**: Stage-0 max batch size from unlimited to 1
|
||||
**Resolve Batch Size Optimization**:
|
||||
- **Changed**: Resolve max batch size from unlimited to 1
|
||||
- **Mechanism**: Single-item processing checks for work more frequently, keeping the thread in fast coordination paths instead of expensive spin/wait cycles
|
||||
- **Profile evidence**: Coordination overhead reduced from ~11% to ~5.6% CPU time
|
||||
- **Result**: Additional 12.7% increase in serial CPU budget (488ns → 550ns)
|
||||
- **Overall improvement**: 38.9% increase from baseline (396ns → 550ns)
|
||||
|
||||
### Request Flow
|
||||
|
||||
@@ -56,8 +53,8 @@ I/O Threads (8) → HttpHandler::on_batch_complete() → Commit Pipeline
|
||||
↑ ↓
|
||||
| Stage 0: Sequence (noop)
|
||||
| ↓
|
||||
| Stage 1: Resolve (650ns CPU work)
|
||||
| (spend_cpu_cycles(7000))
|
||||
| Stage 1: Resolve (740ns CPU work)
|
||||
| (spend_cpu_cycles(4000))
|
||||
| ↓
|
||||
| Stage 2: Persist (generate response)
|
||||
| (send "OK" response)
|
||||
@@ -71,8 +68,8 @@ I/O Threads (8) → HttpHandler::on_batch_complete() → Commit Pipeline
|
||||
## Test Configuration
|
||||
|
||||
- Server: test_benchmark_config.toml with 8 io_threads, 8 epoll_instances
|
||||
- Configuration: `ok_resolve_iterations = 7000` (650ns CPU work)
|
||||
- Configuration: `ok_resolve_iterations = 4000` (740ns CPU work)
|
||||
- Load tester: targeting /ok endpoint
|
||||
- Benchmark validation: ./bench_cpu_work 7000
|
||||
- Benchmark validation: ./bench_cpu_work 4000
|
||||
- Build: ninja
|
||||
- Command: ./weaseldb --config test_benchmark_config.toml
|
||||
|
||||
Reference in New Issue
Block a user