Revert "buffer has to be prepared in advance anyways so set flag there"

This reverts commit caf0fa84af.
This commit is contained in:
J. Nick Koston
2026-02-05 11:19:28 +01:00
parent 87dc930dcf
commit e5bd6865ca
2 changed files with 18 additions and 24 deletions

View File

@@ -331,7 +331,8 @@ uint16_t APIConnection::encode_message_to_buffer(ProtoMessage &msg, uint8_t mess
std::vector<uint8_t> &shared_buf = conn->parent_->get_shared_buffer_ref();
if (conn->flags_.batch_first_message) {
// First message - buffer already prepared by caller, just clear the flag
// Single message or first batch message
conn->prepare_first_message_buffer(shared_buf, header_padding, total_calculated_size);
conn->flags_.batch_first_message = false;
} else {
// Batch message second or later
@@ -1859,10 +1860,9 @@ void APIConnection::process_batch_() {
// Fast path for single message - allocate exact size needed
if (num_items == 1) {
const auto &item = this->deferred_batch_[0];
this->prepare_first_message_buffer(shared_buf, item.estimated_size);
// Let dispatch_message_ calculate size and encode if it fits
uint16_t payload_size = this->dispatch_message_(item, std::numeric_limits<uint16_t>::max());
uint16_t payload_size = this->dispatch_message_(item, std::numeric_limits<uint16_t>::max(), true);
if (payload_size > 0 && this->send_buffer(ProtoWriteBuffer{&shared_buf}, item.message_type)) {
#ifdef HAS_PROTO_MESSAGE_DUMP
@@ -1889,16 +1889,19 @@ void APIConnection::process_batch_() {
const uint8_t header_padding = this->helper_->frame_header_padding();
const uint8_t footer_size = this->helper_->frame_footer_size();
// Calculate total overhead for all messages
// Reserve based on estimated size (much more accurate than 24-byte worst-case)
// Initialize buffer and tracking variables
shared_buf.clear();
// Pre-calculate exact buffer size needed based on message types
uint32_t total_estimated_size = num_items * (header_padding + footer_size);
for (size_t i = 0; i < this->deferred_batch_.size(); i++) {
const auto &item = this->deferred_batch_[i];
total_estimated_size += item.estimated_size;
}
// Initialize buffer with total estimated size and set batch_first_message flag
this->prepare_first_message_buffer(shared_buf, header_padding, total_estimated_size);
// Calculate total overhead for all messages
// Reserve based on estimated size (much more accurate than 24-byte worst-case)
shared_buf.reserve(total_estimated_size);
size_t items_processed = 0;
uint16_t remaining_size = std::numeric_limits<uint16_t>::max();
@@ -1914,7 +1917,7 @@ void APIConnection::process_batch_() {
const auto &item = this->deferred_batch_[i];
// Try to encode message via dispatch
// The dispatch function calculates overhead to determine if the message fits
uint16_t payload_size = this->dispatch_message_(item, remaining_size);
uint16_t payload_size = this->dispatch_message_(item, remaining_size, i == 0);
if (payload_size == 0) {
// Message won't fit, stop processing
@@ -1982,7 +1985,9 @@ void APIConnection::process_batch_() {
// Dispatch message encoding based on message_type
// Switch assigns function pointer, single call site for smaller code size
uint16_t APIConnection::dispatch_message_(const DeferredBatch::BatchItem &item, uint32_t remaining_size) {
uint16_t APIConnection::dispatch_message_(const DeferredBatch::BatchItem &item, uint32_t remaining_size,
bool batch_first) {
this->flags_.batch_first_message = batch_first;
#ifdef USE_EVENT
// Events need aux_data_index to look up event type from entity
if (item.message_type == EventResponse::MESSAGE_TYPE) {

View File

@@ -275,15 +275,6 @@ class APIConnection final : public APIServerConnection {
shared_buf.reserve(total_size);
// Resize to add header padding so message encoding starts at the correct position
shared_buf.resize(header_padding);
// Mark that the next encode should use fresh buffer positioning
this->flags_.batch_first_message = true;
}
// Convenience overload - computes frame overhead internally
void prepare_first_message_buffer(std::vector<uint8_t> &shared_buf, size_t payload_size) {
const uint8_t header_padding = this->helper_->frame_header_padding();
const uint8_t footer_size = this->helper_->frame_footer_size();
this->prepare_first_message_buffer(shared_buf, header_padding, payload_size + header_padding + footer_size);
}
bool try_to_clear_buffer(bool log_out_of_space);
@@ -628,12 +619,12 @@ class APIConnection final : public APIServerConnection {
// Dispatch message encoding based on message_type - replaces function pointer storage
// Switch assigns pointer, single call site for smaller code size
uint16_t dispatch_message_(const DeferredBatch::BatchItem &item, uint32_t remaining_size);
uint16_t dispatch_message_(const DeferredBatch::BatchItem &item, uint32_t remaining_size, bool batch_first);
#ifdef HAS_PROTO_MESSAGE_DUMP
void log_batch_item_(const DeferredBatch::BatchItem &item) {
this->flags_.log_only_mode = true;
this->dispatch_message_(item, MAX_BATCH_PACKET_SIZE);
this->dispatch_message_(item, MAX_BATCH_PACKET_SIZE, true);
this->flags_.log_only_mode = false;
}
#endif
@@ -662,11 +653,9 @@ class APIConnection final : public APIServerConnection {
bool send_message_smart_(EntityBase *entity, uint8_t message_type, uint8_t estimated_size,
uint8_t aux_data_index = DeferredBatch::AUX_DATA_UNUSED) {
if (this->should_send_immediately_(message_type) && this->helper_->can_write_without_blocking()) {
auto &shared_buf = this->parent_->get_shared_buffer_ref();
this->prepare_first_message_buffer(shared_buf, estimated_size);
DeferredBatch::BatchItem item{entity, message_type, estimated_size, aux_data_index};
if (this->dispatch_message_(item, MAX_BATCH_PACKET_SIZE) &&
this->send_buffer(ProtoWriteBuffer{&shared_buf}, message_type)) {
if (this->dispatch_message_(item, MAX_BATCH_PACKET_SIZE, true) &&
this->send_buffer(ProtoWriteBuffer{&this->parent_->get_shared_buffer_ref()}, message_type)) {
#ifdef HAS_PROTO_MESSAGE_DUMP
this->log_batch_item_(item);
#endif