#include "commit_request.hpp" #include "json_commit_request_parser.hpp" #include "test_data.hpp" #include #include using namespace weaseldb::test_data; // JSON test data is now provided by test_data.hpp int main() { // One-shot parsing benchmarks auto bench = ankerl::nanobench::Bench() .title("CommitRequest One-Shot Parsing") .unit("parse") .warmup(100); // Simple JSON parsing bench.run("Simple JSON (3 fields)", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = SIMPLE_JSON; bool result = parser.parse(request, mutable_json.data(), mutable_json.size()); ankerl::nanobench::doNotOptimizeAway(result); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); // Medium complexity JSON parsing bench.run("Medium JSON (2 preconditions, 2 operations)", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = MEDIUM_JSON; bool result = parser.parse(request, mutable_json.data(), mutable_json.size()); ankerl::nanobench::doNotOptimizeAway(result); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); // Complex JSON parsing bench.run("Complex JSON (3 preconditions, 5 operations)", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = COMPLEX_JSON; bool result = parser.parse(request, mutable_json.data(), mutable_json.size()); ankerl::nanobench::doNotOptimizeAway(result); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); // Large batch operations for (int num_ops : {10, 50, 100, 500}) { std::string large_json = generate_large_json(num_ops); bench.run("Large JSON (" + std::to_string(num_ops) + " operations)", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = large_json; bool result = parser.parse(request, mutable_json.data(), mutable_json.size()); ankerl::nanobench::doNotOptimizeAway(result); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); } // Streaming parsing benchmarks auto streaming_bench = ankerl::nanobench::Bench() .title("CommitRequest Streaming Parsing") .unit("parse") .warmup(50); // Streaming with different chunk sizes for (int chunk_size : {1, 8, 32, 128, 512}) { streaming_bench.run( "Streaming Medium JSON (chunk size " + std::to_string(chunk_size) + ")", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = MEDIUM_JSON; parser.begin_streaming_parse(request); size_t offset = 0; CommitRequestParser::ParseStatus status = CommitRequestParser::ParseStatus::Incomplete; while (offset < mutable_json.size() && status == CommitRequestParser::ParseStatus::Incomplete) { size_t len = std::min(static_cast(chunk_size), mutable_json.size() - offset); status = parser.parse_chunk(request, mutable_json.data() + offset, len); offset += len; } if (status == CommitRequestParser::ParseStatus::Incomplete) { status = parser.finish_streaming_parse(request); } ankerl::nanobench::doNotOptimizeAway(status); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); } // Reset and reuse benchmarks auto reuse_bench = ankerl::nanobench::Bench() .title("CommitRequest Reset and Reuse") .unit("operation") .warmup(50); reuse_bench.run("Parse -> Reset -> Parse cycle", [&] { static CommitRequest request; // Static to persist across invocations static JsonCommitRequestParser parser; std::string mutable_json1 = SIMPLE_JSON; bool result1 = parser.parse(request, mutable_json1.data(), mutable_json1.size()); request.reset(); std::string mutable_json2 = MEDIUM_JSON; bool result2 = parser.parse(request, mutable_json2.data(), mutable_json2.size()); ankerl::nanobench::doNotOptimizeAway(result1); ankerl::nanobench::doNotOptimizeAway(result2); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); // Base64 decoding performance auto base64_bench = ankerl::nanobench::Bench() .title("Base64 Decoding Performance") .unit("decode") .warmup(50); // JSON with lots of base64 encoded data std::string base64_heavy_json = R"({ "leader_id": "base64-test-leader", "read_version": 12345, "operations": [)"; for (int i = 0; i < 20; ++i) { if (i > 0) base64_heavy_json += ","; base64_heavy_json += R"( { "type": "write", "key": "VGhpc0lzQUxvbmdCYXNlNjRFbmNvZGVkS2V5V2l0aExvdHNPZkRhdGFGb3JUZXN0aW5nUHVycG9zZXM=", "value": "VGhpc0lzQW5FdmVuTG9uZ2VyQmFzZTY0RW5jb2RlZFZhbHVlV2l0aEV2ZW5Nb3JlRGF0YUZvclRlc3RpbmdUaGVCYXNlNjREZWNvZGluZ1BlcmZvcm1hbmNlT2ZUaGVQYXJzZXI=" })"; } base64_heavy_json += R"( ] })"; base64_bench.run( "Heavy Base64 JSON (20 operations with long encoded data)", [&] { CommitRequest request; JsonCommitRequestParser parser; std::string mutable_json = base64_heavy_json; bool result = parser.parse(request, mutable_json.data(), mutable_json.size()); ankerl::nanobench::doNotOptimizeAway(result); ankerl::nanobench::doNotOptimizeAway(request.leader_id()); }); return 0; }