Probably going to merge accept and network threads

This commit is contained in:
2025-08-20 16:24:09 -04:00
parent 24a1157f0d
commit 7e28e6503d
6 changed files with 166 additions and 72 deletions

View File

@@ -317,49 +317,10 @@ void Server::start_network_threads() {
continue; // Connection closed - unique_ptr destructor cleans up
}
if (events[i].events & EPOLLIN) {
auto buf_size = config_.server.read_buffer_size;
char buf[buf_size];
std::string_view data = conn->readBytes(buf, buf_size);
if (data.empty()) {
// No data, error, or EOF - connection should be closed
continue;
}
// Call handler with unique_ptr - handler can take ownership if
// needed
handler_.on_data_arrived(data, conn);
// If handler took ownership (conn is now null), don't continue
// processing
if (!conn) {
continue;
}
}
// Send immediately if we already already have outgoing messages from
// read callbacks.
if ((events[i].events & EPOLLOUT) ||
((events[i].events & EPOLLIN) && conn->hasMessages())) {
bool error = conn->writeBytes();
if (error) {
continue;
}
// Call handler with unique_ptr - handler can take ownership if
// needed
handler_.on_write_progress(conn);
// If handler took ownership (conn is now null), don't continue
// processing
if (!conn) {
continue;
}
// Check if we should close the connection according to application
if (!conn->hasMessages() && conn->closeConnection_) {
continue;
}
// Process I/O using shared helper function
bool should_continue = process_connection_io(conn, events[i].events);
if (!should_continue || !conn) {
continue; // Connection closed or handler took ownership
}
batch[batch_count++] = std::move(conn);
}
@@ -421,7 +382,10 @@ void Server::start_accept_threads() {
}
if (events[i].data.fd == listen_sockfd_) {
// Accept new connections
// Accept new connections and batch them
std::unique_ptr<Connection> batch[config_.server.event_batch_size];
int batch_count = 0;
for (;;) {
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
@@ -457,19 +421,86 @@ void Server::start_accept_threads() {
connection_id_.fetch_add(1, std::memory_order_relaxed),
&handler_, weak_from_this());
// Transfer to network epoll
struct epoll_event event{};
event.events = EPOLLIN | EPOLLONESHOT | EPOLLRDHUP;
conn->tsan_release();
Connection *raw_conn = conn.release();
event.data.ptr = raw_conn;
// Try to process I/O once in the accept thread before handing off
// to network threads
bool should_continue = process_connection_io(conn, EPOLLIN);
if (epoll_ctl(network_epollfd_, EPOLL_CTL_ADD, fd, &event) ==
-1) {
perror("epoll_ctl ADD");
delete raw_conn;
if (!should_continue) {
// Connection should be closed (error or application decision)
continue;
}
// Add to batch if we still have the connection
if (conn) {
batch[batch_count++] = std::move(conn);
// If batch is full, process it and start a new batch
if (batch_count >= config_.server.event_batch_size) {
handler_.on_post_batch({batch, (size_t)batch_count});
// Transfer all batched connections to network epoll
for (int j = 0; j < batch_count; ++j) {
auto &batched_conn = batch[j];
if (!batched_conn)
continue;
struct epoll_event event{};
// Determine next epoll interest based on whether we have
// messages to send
if (!batched_conn->hasMessages()) {
event.events = EPOLLIN | EPOLLONESHOT | EPOLLRDHUP;
} else {
event.events = EPOLLOUT | EPOLLONESHOT | EPOLLRDHUP;
}
int fd = batched_conn->getFd();
batched_conn->tsan_release();
Connection *raw_conn = batched_conn.release();
event.data.ptr = raw_conn;
if (epoll_ctl(network_epollfd_, EPOLL_CTL_ADD, fd,
&event) == -1) {
perror("epoll_ctl ADD");
delete raw_conn;
continue;
}
}
batch_count = 0; // Reset batch
}
}
}
// Process any remaining connections in the batch
if (batch_count > 0) {
handler_.on_post_batch({batch, (size_t)batch_count});
// Transfer remaining batched connections to network epoll
for (int j = 0; j < batch_count; ++j) {
auto &batched_conn = batch[j];
if (!batched_conn)
continue;
struct epoll_event event{};
// Determine next epoll interest based on whether we have
// messages to send
if (!batched_conn->hasMessages()) {
event.events = EPOLLIN | EPOLLONESHOT | EPOLLRDHUP;
} else {
event.events = EPOLLOUT | EPOLLONESHOT | EPOLLRDHUP;
}
int fd = batched_conn->getFd();
batched_conn->tsan_release();
Connection *raw_conn = batched_conn.release();
event.data.ptr = raw_conn;
if (epoll_ctl(network_epollfd_, EPOLL_CTL_ADD, fd, &event) ==
-1) {
perror("epoll_ctl ADD");
delete raw_conn;
continue;
}
}
}
}
}
@@ -478,6 +509,59 @@ void Server::start_accept_threads() {
}
}
bool Server::process_connection_io(std::unique_ptr<Connection> &conn,
int events) {
// Handle EPOLLIN - read data and process it
if (events & EPOLLIN) {
auto buf_size = config_.server.read_buffer_size;
char buf[buf_size];
int r = conn->readBytes(buf, buf_size);
if (r < 0) {
// Error or EOF - connection should be closed
return false;
}
if (r == 0) {
// No data available (EAGAIN) - skip read processing but continue
return true;
}
// Call handler with unique_ptr - handler can take ownership if needed
handler_.on_data_arrived(std::string_view{buf, size_t(r)}, conn);
// If handler took ownership (conn is now null), return true to indicate
// processing is done
if (!conn) {
return true;
}
}
// Send immediately if we have outgoing messages (either from EPOLLOUT or
// after reading)
if ((events & EPOLLOUT) || ((events & EPOLLIN) && conn->hasMessages())) {
bool error = conn->writeBytes();
if (error) {
return false; // Connection should be closed
}
// Call handler with unique_ptr - handler can take ownership if needed
handler_.on_write_progress(conn);
// If handler took ownership (conn is now null), return true to indicate
// processing is done
if (!conn) {
return true;
}
// Check if we should close the connection according to application
if (!conn->hasMessages() && conn->closeConnection_) {
return false; // Connection should be closed
}
}
return true; // Connection should continue
}
void Server::cleanup_resources() {
if (shutdown_pipe_[0] != -1) {
close(shutdown_pipe_[0]);