Files
weaseldb/benchmarks/bench_commit_request.cpp

287 lines
9.1 KiB
C++

#include "commit_request.hpp"
#include <nanobench.h>
#include <string>
// Sample JSON strings of varying complexity for benchmarking
const std::string SIMPLE_JSON = R"({
"request_id": "simple-test",
"leader_id": "leader123",
"read_version": 12345
})";
const std::string MEDIUM_JSON = R"({
"request_id": "medium-test",
"leader_id": "leader456",
"read_version": 98765,
"preconditions": [
{
"type": "point_read",
"version": 98764,
"key": "dGVzdEtleQ=="
},
{
"type": "range_read",
"version": 98763,
"begin": "cmFuZ2VTdGFydA==",
"end": "cmFuZ2VFbmQ="
}
],
"operations": [
{
"type": "write",
"key": "d3JpdGVLZXk=",
"value": "d3JpdGVWYWx1ZQ=="
},
{
"type": "delete",
"key": "ZGVsZXRlS2V5"
}
]
})";
const std::string COMPLEX_JSON = R"({
"request_id": "complex-batch-operation-12345",
"leader_id": "leader789abcdef",
"read_version": 999999999,
"preconditions": [
{
"type": "point_read",
"version": 999999998,
"key": "cHJlY29uZGl0aW9uS2V5MQ=="
},
{
"type": "range_read",
"version": 999999997,
"begin": "cmFuZ2VQcmVjb25kaXRpb25CZWdpbg==",
"end": "cmFuZ2VQcmVjb25kaXRpb25FbmQ="
},
{
"type": "point_read",
"version": 999999996,
"key": "YW5vdGhlclByZWNvbmRpdGlvbktleQ=="
}
],
"operations": [
{
"type": "write",
"key": "b3BlcmF0aW9uS2V5MQ==",
"value": "bGFyZ2VPcGVyYXRpb25WYWx1ZVdpdGhMb3RzT2ZEYXRhSGVyZQ=="
},
{
"type": "write",
"key": "b3BlcmF0aW9uS2V5Mg==",
"value": "YW5vdGhlckxhcmdlVmFsdWVXaXRoRXZlbk1vcmVEYXRh"
},
{
"type": "delete",
"key": "ZGVsZXRlT3BlcmF0aW9uS2V5"
},
{
"type": "range_delete",
"begin": "cmFuZ2VEZWxldGVTdGFydA==",
"end": "cmFuZ2VEZWxldGVFbmQ="
},
{
"type": "write",
"key": "ZmluYWxPcGVyYXRpb25LZXk=",
"value": "ZmluYWxPcGVyYXRpb25WYWx1ZVdpdGhMb25nZXJEYXRhRm9yVGVzdGluZw=="
}
]
})";
// Generate a large JSON with many operations for stress testing
std::string generate_large_json(int num_operations) {
std::string json = R"({
"request_id": "large-batch-)" +
std::to_string(num_operations) + R"(",
"leader_id": "stress-test-leader",
"read_version": 1000000,
"operations": [)";
for (int i = 0; i < num_operations; ++i) {
if (i > 0)
json += ",";
json += R"(
{
"type": "write",
"key": ")" +
std::string("key") + std::to_string(i) + R"(",
"value": ")" +
std::string("value") + std::to_string(i) + R"("
})";
}
json += R"(
]
})";
return json;
}
int main() {
// One-shot parsing benchmarks
auto bench = ankerl::nanobench::Bench()
.title("CommitRequest One-Shot Parsing")
.unit("parse")
.warmup(100);
// Simple JSON parsing
bench.run("Simple JSON (3 fields)", [&] {
CommitRequest request;
std::string mutable_json = SIMPLE_JSON;
bool result = request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
// Medium complexity JSON parsing
bench.run("Medium JSON (2 preconditions, 2 operations)", [&] {
CommitRequest request;
std::string mutable_json = MEDIUM_JSON;
bool result = request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
// Complex JSON parsing
bench.run("Complex JSON (3 preconditions, 5 operations)", [&] {
CommitRequest request;
std::string mutable_json = COMPLEX_JSON;
bool result = request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
// Large batch operations
for (int num_ops : {10, 50, 100, 500}) {
std::string large_json = generate_large_json(num_ops);
bench.run("Large JSON (" + std::to_string(num_ops) + " operations)", [&] {
CommitRequest request;
std::string mutable_json = large_json;
bool result =
request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
}
// Streaming parsing benchmarks
auto streaming_bench = ankerl::nanobench::Bench()
.title("CommitRequest Streaming Parsing")
.unit("parse")
.warmup(50);
// Streaming with different chunk sizes
for (int chunk_size : {1, 8, 32, 128, 512}) {
streaming_bench.run(
"Streaming Medium JSON (chunk size " + std::to_string(chunk_size) + ")",
[&] {
CommitRequest request;
std::string mutable_json = MEDIUM_JSON;
request.begin_streaming_parse();
size_t offset = 0;
CommitRequest::ParseStatus status =
CommitRequest::ParseStatus::Incomplete;
while (offset < mutable_json.size() &&
status == CommitRequest::ParseStatus::Incomplete) {
size_t len = std::min(static_cast<size_t>(chunk_size),
mutable_json.size() - offset);
status = request.parse_chunk(mutable_json.data() + offset, len);
offset += len;
}
if (status == CommitRequest::ParseStatus::Incomplete) {
status = request.finish_streaming_parse();
}
ankerl::nanobench::doNotOptimizeAway(status);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
}
// Memory allocation efficiency benchmarks
auto memory_bench = ankerl::nanobench::Bench()
.title("CommitRequest Memory Usage")
.unit("allocation")
.warmup(50);
// Different arena sizes
for (size_t arena_size : {1024, 4096, 16384, 65536}) {
memory_bench.run(
"Arena size " + std::to_string(arena_size) + " bytes", [&] {
CommitRequest request(arena_size);
std::string mutable_json = COMPLEX_JSON;
bool result =
request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.total_allocated());
ankerl::nanobench::doNotOptimizeAway(request.used_bytes());
});
}
// Reset and reuse benchmarks
auto reuse_bench = ankerl::nanobench::Bench()
.title("CommitRequest Reset and Reuse")
.unit("operation")
.warmup(50);
reuse_bench.run("Parse -> Reset -> Parse cycle", [&] {
static CommitRequest request; // Static to persist across invocations
std::string mutable_json1 = SIMPLE_JSON;
bool result1 =
request.parse_json(mutable_json1.data(), mutable_json1.size());
request.reset();
std::string mutable_json2 = MEDIUM_JSON;
bool result2 =
request.parse_json(mutable_json2.data(), mutable_json2.size());
ankerl::nanobench::doNotOptimizeAway(result1);
ankerl::nanobench::doNotOptimizeAway(result2);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
// Base64 decoding performance
auto base64_bench = ankerl::nanobench::Bench()
.title("Base64 Decoding Performance")
.unit("decode")
.warmup(50);
// JSON with lots of base64 encoded data
std::string base64_heavy_json = R"({
"leader_id": "base64-test-leader",
"read_version": 12345,
"operations": [)";
for (int i = 0; i < 20; ++i) {
if (i > 0)
base64_heavy_json += ",";
base64_heavy_json += R"(
{
"type": "write",
"key": "VGhpc0lzQUxvbmdCYXNlNjRFbmNvZGVkS2V5V2l0aExvdHNPZkRhdGFGb3JUZXN0aW5nUHVycG9zZXM=",
"value": "VGhpc0lzQW5FdmVuTG9uZ2VyQmFzZTY0RW5jb2RlZFZhbHVlV2l0aEV2ZW5Nb3JlRGF0YUZvclRlc3RpbmdUaGVCYXNlNjREZWNvZGluZ1BlcmZvcm1hbmNlT2ZUaGVQYXJzZXI="
})";
}
base64_heavy_json += R"(
]
})";
base64_bench.run(
"Heavy Base64 JSON (20 operations with long encoded data)", [&] {
CommitRequest request;
std::string mutable_json = base64_heavy_json;
bool result =
request.parse_json(mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.is_parse_complete());
});
return 0;
}