269 lines
8.9 KiB
C++
269 lines
8.9 KiB
C++
#include "config.hpp"
|
|
#include "connection.hpp"
|
|
#include "http_handler.hpp"
|
|
#include "server.hpp"
|
|
|
|
#include <chrono>
|
|
#include <doctest/doctest.h>
|
|
#include <fcntl.h>
|
|
#include <poll.h>
|
|
#include <string>
|
|
#include <thread>
|
|
#include <unistd.h>
|
|
|
|
// Test to demonstrate HTTP pipelining response ordering issue
|
|
//
|
|
// HTTP/1.1 pipelining allows multiple requests to be sent on a single
|
|
// connection without waiting for responses, but responses MUST be sent in the
|
|
// same order as requests were received (RFC 2616 Section 8.1.2.2).
|
|
//
|
|
// This test sends two pipelined requests:
|
|
// 1. GET /ok - Slow response (goes through 4-stage pipeline processing)
|
|
// 2. GET /metrics - Fast response (handled directly, just collects metrics)
|
|
//
|
|
// Even though /ok takes longer to process due to pipeline overhead, the /ok
|
|
// response should be sent first since it was requested first. Currently this
|
|
// test FAILS because the faster /metrics response completes before /ok and
|
|
// gets sent out of order.
|
|
TEST_CASE("HTTP pipelined responses out of order") {
|
|
weaseldb::Config config;
|
|
HttpHandler handler(config);
|
|
auto server = Server::create(config, handler, {});
|
|
int fd = server->create_local_connection();
|
|
|
|
auto runThread = std::thread{[&]() { server->run(); }};
|
|
|
|
// Send two pipelined requests in a single write() call
|
|
// Request order: /ok first, then /metrics
|
|
// Expected response order: /ok response first, then /metrics response
|
|
// Actual result: /metrics response first (fast), then /ok response (slow)
|
|
std::string pipelined_requests = "GET /ok HTTP/1.1\r\n"
|
|
"Host: localhost\r\n"
|
|
"Connection: keep-alive\r\n"
|
|
"\r\n"
|
|
"GET /metrics HTTP/1.1\r\n"
|
|
"Host: localhost\r\n"
|
|
"Connection: keep-alive\r\n"
|
|
"\r\n";
|
|
|
|
int w = write(fd, pipelined_requests.c_str(), pipelined_requests.size());
|
|
REQUIRE(w == static_cast<int>(pipelined_requests.size()));
|
|
|
|
// Set socket to non-blocking
|
|
int flags = fcntl(fd, F_GETFL, 0);
|
|
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
|
|
|
// Read all responses with non-blocking I/O and poll
|
|
char buf[8192];
|
|
int total_read = 0;
|
|
|
|
bool found_ok = false;
|
|
bool found_http_response = false;
|
|
|
|
std::string ok_response_header = "Content-Length: 2";
|
|
|
|
while (true) {
|
|
// Use poll to wait for data availability
|
|
struct pollfd pfd = {fd, POLLIN, 0};
|
|
int poll_result = poll(&pfd, 1, -1); // Block indefinitely
|
|
|
|
if (poll_result > 0 && (pfd.revents & POLLIN)) {
|
|
int r = read(fd, buf + total_read, sizeof(buf) - total_read - 1);
|
|
if (r > 0) {
|
|
printf("%.*s", r, buf + total_read);
|
|
total_read += r;
|
|
|
|
// Check if we have what we need after each read
|
|
buf[total_read] = '\0';
|
|
std::string current_data(buf, total_read);
|
|
|
|
found_http_response =
|
|
current_data.find("HTTP/1.1") != std::string::npos;
|
|
found_ok = current_data.find(ok_response_header) != std::string::npos;
|
|
|
|
// If we have both HTTP response and ok_response_header, we can proceed
|
|
// with the test
|
|
if (found_http_response && found_ok) {
|
|
break;
|
|
}
|
|
} else if (r == 0) {
|
|
REQUIRE(false);
|
|
break; // EOF
|
|
} else if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
|
REQUIRE(false);
|
|
}
|
|
}
|
|
}
|
|
|
|
buf[total_read] = '\0';
|
|
std::string response_data(buf, total_read);
|
|
|
|
// Ensure we found both HTTP response and ok_response_header
|
|
REQUIRE(found_http_response);
|
|
REQUIRE(found_ok);
|
|
|
|
// Find first occurrence of ok_response_header in response body
|
|
std::size_t ok_pos = response_data.find(ok_response_header);
|
|
REQUIRE(ok_pos != std::string::npos);
|
|
|
|
// Count HTTP response status lines before the /ok response body
|
|
// This tests response ordering: should be exactly 1 (the /ok response itself)
|
|
std::string before_ok = response_data.substr(0, ok_pos);
|
|
int http_response_count = 0;
|
|
std::size_t pos = 0;
|
|
while ((pos = before_ok.find("HTTP/1.1", pos)) != std::string::npos) {
|
|
http_response_count++;
|
|
pos += 8;
|
|
}
|
|
|
|
// Assert there's exactly one HTTP response line before /ok response body
|
|
// If http_response_count == 2, it means /metrics response came first (wrong
|
|
// order) If http_response_count == 1, it means /ok response came first
|
|
// (correct order)
|
|
CHECK(http_response_count == 1);
|
|
|
|
close(fd);
|
|
server->shutdown();
|
|
runThread.join();
|
|
}
|
|
|
|
TEST_CASE("HTTP pipelined POST requests race condition") {
|
|
weaseldb::Config config;
|
|
HttpHandler handler(config);
|
|
auto server = Server::create(config, handler, {});
|
|
int fd = server->create_local_connection();
|
|
|
|
auto runThread = std::thread{[&]() { server->run(); }};
|
|
|
|
// Create a POST request with JSON body that requires parsing
|
|
std::string json_body = R"({
|
|
"request_id": "test-123",
|
|
"leader_id": "leader-1",
|
|
"read_version": 1,
|
|
"preconditions": [],
|
|
"operations": [{"write": {"key": "dGVzdA==", "value": "dmFsdWU="}}]
|
|
})";
|
|
|
|
std::string first_post = "POST /v1/commit HTTP/1.1\r\n"
|
|
"Host: localhost\r\n"
|
|
"Content-Type: application/json\r\n"
|
|
"Content-Length: " +
|
|
std::to_string(json_body.size()) +
|
|
"\r\n"
|
|
"Connection: keep-alive\r\n"
|
|
"\r\n" +
|
|
json_body;
|
|
|
|
std::string second_get = "GET /v1/version HTTP/1.1\r\n"
|
|
"Host: localhost\r\n"
|
|
"Connection: close\r\n"
|
|
"\r\n";
|
|
|
|
// Send POST request followed immediately by GET request
|
|
// This creates a scenario where the GET request starts parsing
|
|
// while the POST response is being written (triggering the reset)
|
|
int w1 = write(fd, first_post.c_str(), first_post.size());
|
|
REQUIRE(w1 == static_cast<int>(first_post.size()));
|
|
|
|
int w2 = write(fd, second_get.c_str(), second_get.size());
|
|
REQUIRE(w2 == static_cast<int>(second_get.size()));
|
|
|
|
// Read responses using blocking I/O (deterministic synchronization)
|
|
char buf[4096];
|
|
int total_read = 0;
|
|
int responses_found = 0;
|
|
|
|
while (total_read < 4000) {
|
|
int r = read(fd, buf + total_read, sizeof(buf) - total_read - 1);
|
|
if (r <= 0)
|
|
break;
|
|
total_read += r;
|
|
|
|
buf[total_read] = '\0';
|
|
std::string response(buf, total_read);
|
|
std::size_t pos = 0;
|
|
while ((pos = response.find("HTTP/1.1", pos)) != std::string::npos) {
|
|
responses_found++;
|
|
pos += 8;
|
|
}
|
|
|
|
if (responses_found >= 2)
|
|
break;
|
|
}
|
|
|
|
// Should get responses to both requests
|
|
// Race condition might cause parsing errors or connection issues
|
|
CHECK(responses_found >= 1); // At minimum should handle first request
|
|
|
|
close(fd);
|
|
server->shutdown();
|
|
runThread.join();
|
|
}
|
|
|
|
TEST_CASE("HTTP URL split across multiple writes") {
|
|
weaseldb::Config config;
|
|
HttpHandler handler(config);
|
|
auto server = Server::create(config, handler, {});
|
|
int fd = server->create_local_connection();
|
|
|
|
auto runThread = std::thread{[&]() { server->run(); }};
|
|
|
|
// Test URL accumulation by splitting the URL across multiple writes
|
|
// This would have caught the original bug where URL string_view pointed
|
|
// to llhttp's internal buffer that gets reused between writes
|
|
|
|
// Split "GET /metrics HTTP/1.1\r\n" across multiple writes
|
|
std::string part1 = "GET /met";
|
|
std::string part2 = "rics HTTP/1.1\r\n";
|
|
std::string headers = "Host: localhost\r\n"
|
|
"Connection: close\r\n"
|
|
"\r\n";
|
|
|
|
// Write URL in two parts - this tests URL accumulation
|
|
int w1 = write(fd, part1.c_str(), part1.size());
|
|
REQUIRE(w1 == static_cast<int>(part1.size()));
|
|
|
|
// Attempt to trigger separate llhttp parsing calls
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
|
|
int w2 = write(fd, part2.c_str(), part2.size());
|
|
REQUIRE(w2 == static_cast<int>(part2.size()));
|
|
|
|
int w3 = write(fd, headers.c_str(), headers.size());
|
|
REQUIRE(w3 == static_cast<int>(headers.size()));
|
|
|
|
// Read response
|
|
char buf[4096];
|
|
int total_read = 0;
|
|
bool found_metrics_response = false;
|
|
|
|
while (total_read < 4000) {
|
|
int r = read(fd, buf + total_read, sizeof(buf) - total_read - 1);
|
|
if (r <= 0)
|
|
break;
|
|
total_read += r;
|
|
|
|
buf[total_read] = '\0';
|
|
std::string response(buf, total_read);
|
|
|
|
// Check for successful metrics response (not 404)
|
|
if (response.find("HTTP/1.1 200 OK") != std::string::npos &&
|
|
response.find("text/plain; version=0.0.4") != std::string::npos) {
|
|
found_metrics_response = true;
|
|
break;
|
|
}
|
|
|
|
// Check for 404 which would indicate URL accumulation failed
|
|
if (response.find("HTTP/1.1 404") != std::string::npos) {
|
|
FAIL("Got 404 - URL accumulation failed, split URL was not properly "
|
|
"reconstructed");
|
|
}
|
|
}
|
|
|
|
REQUIRE(found_metrics_response);
|
|
|
|
close(fd);
|
|
server->shutdown();
|
|
runThread.join();
|
|
}
|