Measure per byte in benchmarks and remove some of them

This commit is contained in:
2025-08-17 14:40:57 -04:00
parent 05ee8e05f8
commit 032a4184cc
2 changed files with 20 additions and 201 deletions

View File

@@ -13,9 +13,10 @@ int main() {
// One-shot parsing benchmarks
auto bench = ankerl::nanobench::Bench()
.title("CommitRequest One-Shot Parsing")
.unit("parse")
.unit("byte")
.warmup(100);
bench.batch(SIMPLE_JSON.size());
// Simple JSON parsing
bench.run("Simple JSON (3 fields)", [&] {
CommitRequest request;
@@ -27,6 +28,7 @@ int main() {
ankerl::nanobench::doNotOptimizeAway(request.leader_id());
});
bench.batch(MEDIUM_JSON.size());
// Medium complexity JSON parsing
bench.run("Medium JSON (2 preconditions, 2 operations)", [&] {
CommitRequest request;
@@ -38,6 +40,7 @@ int main() {
ankerl::nanobench::doNotOptimizeAway(request.leader_id());
});
bench.batch(COMPLEX_JSON.size());
// Complex JSON parsing
bench.run("Complex JSON (3 preconditions, 5 operations)", [&] {
CommitRequest request;
@@ -52,6 +55,7 @@ int main() {
// Large batch operations
for (int num_ops : {10, 50, 100, 500}) {
std::string large_json = generate_large_json(num_ops);
bench.batch(large_json.size());
bench.run("Large JSON (" + std::to_string(num_ops) + " operations)", [&] {
CommitRequest request;
JsonCommitRequestParser parser;
@@ -66,7 +70,8 @@ int main() {
// Streaming parsing benchmarks
auto streaming_bench = ankerl::nanobench::Bench()
.title("CommitRequest Streaming Parsing")
.unit("parse")
.unit("byte")
.batch(MEDIUM_JSON.size())
.warmup(50);
// Streaming with different chunk sizes
@@ -102,67 +107,5 @@ int main() {
});
}
// Reset and reuse benchmarks
auto reuse_bench = ankerl::nanobench::Bench()
.title("CommitRequest Reset and Reuse")
.unit("operation")
.warmup(50);
reuse_bench.run("Parse -> Reset -> Parse cycle", [&] {
static CommitRequest request; // Static to persist across invocations
static JsonCommitRequestParser parser;
std::string mutable_json1 = SIMPLE_JSON;
bool result1 =
parser.parse(request, mutable_json1.data(), mutable_json1.size());
request.reset();
std::string mutable_json2 = MEDIUM_JSON;
bool result2 =
parser.parse(request, mutable_json2.data(), mutable_json2.size());
ankerl::nanobench::doNotOptimizeAway(result1);
ankerl::nanobench::doNotOptimizeAway(result2);
ankerl::nanobench::doNotOptimizeAway(request.leader_id());
});
// Base64 decoding performance
auto base64_bench = ankerl::nanobench::Bench()
.title("Base64 Decoding Performance")
.unit("decode")
.warmup(50);
// JSON with lots of base64 encoded data
std::string base64_heavy_json = R"({
"leader_id": "base64-test-leader",
"read_version": 12345,
"operations": [)";
for (int i = 0; i < 20; ++i) {
if (i > 0)
base64_heavy_json += ",";
base64_heavy_json += R"(
{
"type": "write",
"key": "VGhpc0lzQUxvbmdCYXNlNjRFbmNvZGVkS2V5V2l0aExvdHNPZkRhdGFGb3JUZXN0aW5nUHVycG9zZXM=",
"value": "VGhpc0lzQW5FdmVuTG9uZ2VyQmFzZTY0RW5jb2RlZFZhbHVlV2l0aEV2ZW5Nb3JlRGF0YUZvclRlc3RpbmdUaGVCYXNlNjREZWNvZGluZ1BlcmZvcm1hbmNlT2ZUaGVQYXJzZXI="
})";
}
base64_heavy_json += R"(
]
})";
base64_bench.run(
"Heavy Base64 JSON (20 operations with long encoded data)", [&] {
CommitRequest request;
JsonCommitRequestParser parser;
std::string mutable_json = base64_heavy_json;
bool result =
parser.parse(request, mutable_json.data(), mutable_json.size());
ankerl::nanobench::doNotOptimizeAway(result);
ankerl::nanobench::doNotOptimizeAway(request.leader_id());
});
return 0;
}