From 76083b2cd4f4263b36f4528ded9eb6ff2de4a0dc Mon Sep 17 00:00:00 2001 From: Will Miles Date: Tue, 4 Feb 2025 22:32:17 -0500 Subject: [PATCH 01/34] Replace queue with linked list Replace the bounded queue with a linked list and condvar implementation, and replace the closed_slots system with double indirection via AsyncClient's own memory. This allows the system to correctly handle cases where it is not possible to allocate a new event and still ensure that the client's `onDisconnect()` will be queued for execution. --- src/AsyncTCP.cpp | 1012 +++++++++++++++++++++------------------------- src/AsyncTCP.h | 49 +-- 2 files changed, 483 insertions(+), 578 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 932af57..1a58238 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -15,6 +15,9 @@ extern "C" { #if CONFIG_ASYNC_TCP_USE_WDT #include "esp_task_wdt.h" +#define ASYNC_TCP_MAX_TASK_SLEEP (pdMS_TO_TICKS(1000 * CONFIG_ESP_TASK_WDT_TIMEOUT_S) / 4) +#else +#define ASYNC_TCP_MAX_TASK_SLEEP portMAX_DELAY #endif // Required for: @@ -41,14 +44,18 @@ extern "C" { #define TCP_MUTEX_UNLOCK() #endif // CONFIG_LWIP_TCPIP_CORE_LOCKING -#define INVALID_CLOSED_SLOT -1 - /* TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895 */ #define CONFIG_ASYNC_TCP_POLL_TIMER 1 +#ifdef ASYNC_TCP_DEBUG +#define DEBUG_PRINTF(...) log_d(__VA_ARGS__) +#else +#define DEBUG_PRINTF(...) +#endif + /* * TCP/IP Event Task * */ @@ -65,220 +72,287 @@ typedef enum { LWIP_TCP_DNS } lwip_tcp_event_t; -typedef struct { +struct lwip_tcp_event_packet_t { + lwip_tcp_event_packet_t *next; lwip_tcp_event_t event; - void *arg; + AsyncClient *client; +#ifdef ASYNCTCP_VALIDATE_PCB + tcp_pcb *pcb; +#endif union { struct { - tcp_pcb *pcb; int8_t err; } connected; struct { int8_t err; } error; struct { - tcp_pcb *pcb; uint16_t len; } sent; struct { - tcp_pcb *pcb; pbuf *pb; int8_t err; } recv; struct { - tcp_pcb *pcb; int8_t err; } fin; struct { - tcp_pcb *pcb; - } poll; - struct { - AsyncClient *client; + AsyncServer *server; } accept; struct { const char *name; ip_addr_t addr; } dns; }; -} lwip_tcp_event_packet_t; - -static QueueHandle_t _async_queue = NULL; -static TaskHandle_t _async_service_task_handle = NULL; - -static SemaphoreHandle_t _slots_lock = NULL; -static const int _number_of_closed_slots = CONFIG_LWIP_MAX_ACTIVE_TCP; -static uint32_t _closed_slots[_number_of_closed_slots]; -static uint32_t _closed_index = []() { - _slots_lock = xSemaphoreCreateBinary(); - configASSERT(_slots_lock); // Add sanity check - xSemaphoreGive(_slots_lock); - for (int i = 0; i < _number_of_closed_slots; ++i) { - _closed_slots[i] = 1; +}; + +// Forward declarations for TCP event callbacks +static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err); +static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len); +static void _tcp_error(void *arg, int8_t err); +static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb); + +// helper function +static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient *client, tcp_pcb *pcb) { + // Validation check + if (pcb && (client->pcb() != pcb)) { + // Client structure is corrupt? + log_e("Client mismatch allocating event for 0x%08x 0x%08x vs 0x%08x", (intptr_t)client, (intptr_t)pcb, client->pcb()); + tcp_abort(pcb); + _tcp_error(client, ERR_ARG); + return nullptr; } - return 1; -}(); -static inline bool _init_async_event_queue() { - if (!_async_queue) { - _async_queue = xQueueCreate(CONFIG_ASYNC_TCP_QUEUE_SIZE, sizeof(lwip_tcp_event_packet_t *)); - if (!_async_queue) { - return false; + lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); + + if (!e) { + // Allocation fail - abort client and give up + log_e("OOM allocating event for 0x%08x 0x%08x", (intptr_t)client, (intptr_t)pcb); + if (pcb) { + tcp_abort(pcb); } + _tcp_error(client, ERR_MEM); + return nullptr; } - return true; -} -static inline bool _send_async_event(lwip_tcp_event_packet_t **e, TickType_t wait = portMAX_DELAY) { - return _async_queue && xQueueSend(_async_queue, e, wait) == pdPASS; + e->next = nullptr; + e->event = event; + e->client = client; +#ifdef ASYNCTCP_VALIDATE_PCB + e->pcb = pcb; +#endif + DEBUG_PRINTF("_AE: 0x%08x -> %d 0x%08x 0x%08x", (intptr_t)e, (int)event, (intptr_t)client, (intptr_t)pcb); + return e; } -static inline bool _prepend_async_event(lwip_tcp_event_packet_t **e, TickType_t wait = portMAX_DELAY) { - return _async_queue && xQueueSendToFront(_async_queue, e, wait) == pdPASS; +static void _free_event(lwip_tcp_event_packet_t *evpkt) { + DEBUG_PRINTF("_FE: 0x%08x -> %d 0x%08x [0x%08x]", (intptr_t)evpkt, (int)evpkt->event, (intptr_t)evpkt->client, (intptr_t)evpkt->next); + if ((evpkt->event == LWIP_TCP_RECV) && (evpkt->recv.pb != nullptr)) { + // We must free the packet buffer + pbuf_free(evpkt->recv.pb); + } + free(evpkt); } -static inline bool _get_async_event(lwip_tcp_event_packet_t **e) { - while (true) { - if (!_async_queue) { - break; - } +// Global variables +static SemaphoreHandle_t _async_queue_mutex = nullptr; +static lwip_tcp_event_packet_t *_async_queue_head = nullptr, *_async_queue_tail = nullptr; +static TaskHandle_t _async_service_task_handle = NULL; -#if CONFIG_ASYNC_TCP_USE_WDT - // need to return periodically to feed the dog - if (xQueueReceive(_async_queue, e, pdMS_TO_TICKS(1000)) != pdPASS) { - break; - } -#else - if (xQueueReceive(_async_queue, e, portMAX_DELAY) != pdPASS) { - break; - } -#endif +namespace { +class queue_mutex_guard { + bool holds_mutex; - if ((*e)->event != LWIP_TCP_POLL) { - return true; +public: + inline queue_mutex_guard() : holds_mutex(xSemaphoreTake(_async_queue_mutex, portMAX_DELAY)) {}; + inline ~queue_mutex_guard() { + if (holds_mutex) { + xSemaphoreGive(_async_queue_mutex); } + }; + inline explicit operator bool() const { + return holds_mutex; + }; +}; +} // namespace - /* - Let's try to coalesce two (or more) consecutive poll events into one - this usually happens with poor implemented user-callbacks that are runs too long and makes poll events to stack in the queue - if consecutive user callback for a same connection runs longer that poll time then it will fill the queue with events until it deadlocks. - This is a workaround to mitigate such poor designs and won't let other events/connections to starve the task time. - It won't be effective if user would run multiple simultaneous long running callbacks due to message interleaving. - todo: implement some kind of fair dequeuing or (better) simply punish user for a bad designed callbacks by resetting hog connections - */ - lwip_tcp_event_packet_t *next_pkt = NULL; - while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS) { - if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL) { - if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS) { - free(next_pkt); - next_pkt = NULL; - log_d("coalescing polls, network congestion or async callbacks might be too slow!"); - continue; - } - } - - // quit while loop if next event can't be discarded - break; +static inline bool _init_async_event_queue() { + if (!_async_queue_mutex) { + _async_queue_mutex = xSemaphoreCreateMutex(); + if (!_async_queue_mutex) { + return false; } + } + return true; +} - /* - now we have to decide if to proceed with poll callback handler or discard it? - poor designed apps using asynctcp without proper dataflow control could flood the queue with interleaved pool/ack events. - I.e. on each poll app would try to generate more data to send, which in turn results in additional ack event triggering chain effect - for long connections. Or poll callback could take long time starving other connections. Anyway our goal is to keep the queue length - grows under control (if possible) and poll events are the safest to discard. - Let's discard poll events processing using linear-increasing probability curve when queue size grows over 3/4 - Poll events are periodic and connection could get another chance next time - */ - if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4)) { - free(*e); - *e = NULL; - log_d("discarding poll due to queue congestion"); - continue; // Retry +static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { + queue_mutex_guard guard; + if (guard) { + if (_async_queue_tail) { + _async_queue_tail->next = e; + } else { + _async_queue_head = e; } - return true; + _async_queue_tail = e; +#ifdef ASYNC_TCP_DEBUG + uint32_t n; + xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); + DEBUG_PRINTF("SAA: 0x%08x -> 0x%08x 0x%08x - %d", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail, n); +#else + xTaskNotifyGive(_async_service_task_handle); +#endif } - return false; + return (bool)guard; } -static bool _remove_events_with_arg(void *arg) { - if (!_async_queue) { - return false; +static inline bool _prepend_async_event(lwip_tcp_event_packet_t *e) { + queue_mutex_guard guard; + if (guard) { + if (_async_queue_head) { + e->next = _async_queue_head; + } else { + _async_queue_tail = e; + } + _async_queue_head = e; +#ifdef ASYNC_TCP_DEBUG + uint32_t n; + xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); + DEBUG_PRINTF("PAA: 0x%08x -> 0x%08x 0x%08x - %d", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail, n); +#else + xTaskNotifyGive(_async_service_task_handle); +#endif } + return (bool)guard; +} - lwip_tcp_event_packet_t *first_packet = NULL; - lwip_tcp_event_packet_t *packet = NULL; - - // figure out which is the first non-matching packet so we can keep the order - while (!first_packet) { - if (xQueueReceive(_async_queue, &first_packet, 0) != pdPASS) { - return false; +static inline lwip_tcp_event_packet_t *_get_async_event() { + queue_mutex_guard guard; + lwip_tcp_event_packet_t *e = nullptr; + if (guard) { + e = _async_queue_head; + if (_async_queue_head) { + _async_queue_head = _async_queue_head->next; } - // discard packet if matching - if ((uintptr_t)first_packet->arg == (uintptr_t)arg) { - free(first_packet); - first_packet = NULL; - } else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) { - // try to return first packet to the back of the queue - // we can't wait here if queue is full, because this call has been done from the only consumer task of this queue - // otherwise it would deadlock, we have to discard the event - free(first_packet); - first_packet = NULL; - return false; + if (!_async_queue_head) { + _async_queue_tail = nullptr; } - } - - while (xQueuePeek(_async_queue, &packet, 0) == pdPASS && packet != first_packet) { - if (xQueueReceive(_async_queue, &packet, 0) != pdPASS) { - return false; + DEBUG_PRINTF("GAA: 0x%08x -> 0x%08x 0x%08x", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail); + } + return e; +} + +static bool _remove_events_for(AsyncClient *client) { + queue_mutex_guard guard; + if (guard) { + auto count = 0U, total = 0U; + auto current = _async_queue_head; + auto prev = decltype(current){nullptr}; + while (current != nullptr) { + ++total; + if (current->client == client) { + ++count; + auto last_next = prev ? &prev->next : &_async_queue_head; + *last_next = current->next; + if (_async_queue_tail == current) { + _async_queue_tail = prev; + } + _free_event(current); + current = *last_next; + } else { + prev = current; + current = current->next; + } } - if ((uintptr_t)packet->arg == (uintptr_t)arg) { - // remove matching event - free(packet); - packet = NULL; - // otherwise try to requeue it - } else if (xQueueSend(_async_queue, &packet, 0) != pdPASS) { - // we can't wait here if queue is full, because this call has been done from the only consumer task of this queue - // otherwise it would deadlock, we have to discard the event - free(packet); - packet = NULL; - return false; + DEBUG_PRINTF("_REF: Removed %d/%d for 0x%08x", count, total, (intptr_t)client); + }; + return (bool)guard; +}; + +// Detail class for interacting with AsyncClient internals, but without exposing the API to other parts of the program +class AsyncClient_detail { +public: + static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client) { + client._pcb = nullptr; + return client._end_event; + }; + static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); +}; + +static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) { + // do client-specific setup + auto end_event = _alloc_event(LWIP_TCP_ERROR, client, pcb); + if (end_event) { + tcp_arg(pcb, client); + tcp_recv(pcb, &_tcp_recv); + tcp_sent(pcb, &_tcp_sent); + tcp_err(pcb, &_tcp_error); + tcp_poll(pcb, &_tcp_poll, 1); + }; + return end_event; +} + +static void _teardown_pcb(tcp_pcb *pcb) { + assert(pcb); + // Do teardown + auto old_arg = pcb->callback_arg; + tcp_arg(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_err(pcb, NULL); + tcp_poll(pcb, NULL, 0); + if (old_arg) { + _remove_events_for(reinterpret_cast(old_arg)); + } +} + +void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { + // Special cases first + if (e->event == LWIP_TCP_ERROR) { + DEBUG_PRINTF("-E: 0x%08x %d", e->client, e->error.err); + // Special case: pcb is now invalid, and will have been null'd out by the lwip thread + if (e->client) { + e->client->_error(e->error.err); } + return; // do not free this event, it belongs to the client + } else if (e->event == LWIP_TCP_DNS) { // client has no PCB allocated yet + DEBUG_PRINTF("-D: 0x%08x %s = %s", e->client, e->dns.name, ipaddr_ntoa(&e->dns.addr)); + e->client->_dns_found(&e->dns.addr); } - return true; -} - -static void _handle_async_event(lwip_tcp_event_packet_t *e) { - if (e->arg == NULL) { - // do nothing when arg is NULL - // ets_printf("event arg == NULL: 0x%08x\n", e->recv.pcb); - } else if (e->event == LWIP_TCP_CLEAR) { - _remove_events_with_arg(e->arg); - } else if (e->event == LWIP_TCP_RECV) { - // ets_printf("-R: 0x%08x\n", e->recv.pcb); - AsyncClient::_s_recv(e->arg, e->recv.pcb, e->recv.pb, e->recv.err); + // Now check for client pointer + else if (e->client->pcb() == NULL) { + // This can only happen if event processing is racing with closing or destruction in a third task. + // Drop the event and do nothing. + DEBUG_PRINTF("event client pcb == NULL: 0x%08x", e->client); + } +#ifdef ASYNCTCP_VALIDATE_PCB + else if (e->client.pcb() != e->pcb) { + log_e("event client pcb mismatch: 0x%08x -> 0x%08x vs 0x%08x", e->client, e->client.pcb(), e->pcb); + } +#endif + // OK, process other events + // TODO: is a switch-case more code efficient? + else if (e->event == LWIP_TCP_RECV) { + DEBUG_PRINTF("-R: 0x%08x", e->client->_pcb); + e->client->_recv(e->recv.pb, e->recv.err); + e->recv.pb = nullptr; // client has taken responsibility for freeing it } else if (e->event == LWIP_TCP_FIN) { - // ets_printf("-F: 0x%08x\n", e->fin.pcb); - AsyncClient::_s_fin(e->arg, e->fin.pcb, e->fin.err); + DEBUG_PRINTF("-F: 0x%08x", e->client->_pcb); + e->client->_fin(e->fin.err); } else if (e->event == LWIP_TCP_SENT) { - // ets_printf("-S: 0x%08x\n", e->sent.pcb); - AsyncClient::_s_sent(e->arg, e->sent.pcb, e->sent.len); + DEBUG_PRINTF("-S: 0x%08x", e->client->_pcb); + e->client->_sent(e->sent.len); } else if (e->event == LWIP_TCP_POLL) { - // ets_printf("-P: 0x%08x\n", e->poll.pcb); - AsyncClient::_s_poll(e->arg, e->poll.pcb); - } else if (e->event == LWIP_TCP_ERROR) { - // ets_printf("-E: 0x%08x %d\n", e->arg, e->error.err); - AsyncClient::_s_error(e->arg, e->error.err); + DEBUG_PRINTF("-P: 0x%08x", e->client->_pcb); + e->client->_poll(); } else if (e->event == LWIP_TCP_CONNECTED) { - // ets_printf("C: 0x%08x 0x%08x %d\n", e->arg, e->connected.pcb, e->connected.err); - AsyncClient::_s_connected(e->arg, e->connected.pcb, e->connected.err); + DEBUG_PRINTF("-C: 0x%08x 0x%08x %d", e->client, e->client->_pcb, e->connected.err); + e->client->_connected(e->connected.err); } else if (e->event == LWIP_TCP_ACCEPT) { - // ets_printf("A: 0x%08x 0x%08x\n", e->arg, e->accept.client); - AsyncServer::_s_accepted(e->arg, e->accept.client); - } else if (e->event == LWIP_TCP_DNS) { - // ets_printf("D: 0x%08x %s = %s\n", e->arg, e->dns.name, ipaddr_ntoa(&e->dns.addr)); - AsyncClient::_s_dns_found(e->dns.name, &e->dns.addr, e->arg); + DEBUG_PRINTF("-A: 0x%08x 0x%08x", e->client, e->accept.server); + e->accept.server->_accepted(e->client); } - free((void *)(e)); + _free_event(e); } static void _async_service_task(void *pvParameters) { @@ -287,11 +361,17 @@ static void _async_service_task(void *pvParameters) { log_w("Failed to add async task to WDT"); } #endif - lwip_tcp_event_packet_t *packet = NULL; for (;;) { - if (_get_async_event(&packet)) { - _handle_async_event(packet); + while (auto packet = _get_async_event()) { + AsyncClient_detail::handle_async_event(packet); +#if CONFIG_ASYNC_TCP_USE_WDT + esp_task_wdt_reset(); +#endif } + // queue is empty + // DEBUG_PRINTF("Async task waiting 0x%08",(intptr_t)_async_queue_head); + ulTaskNotifyTake(pdTRUE, ASYNC_TCP_MAX_TASK_SLEEP); + // DEBUG_PRINTF("Async task woke = %d 0x%08x",q, (intptr_t)_async_queue_head); #if CONFIG_ASYNC_TCP_USE_WDT esp_task_wdt_reset(); #endif @@ -302,6 +382,7 @@ static void _async_service_task(void *pvParameters) { vTaskDelete(NULL); _async_service_task_handle = NULL; } + /* static void _stop_async_task(){ if(_async_service_task_handle){ @@ -345,154 +426,94 @@ static bool _start_async_task() { * LwIP Callbacks * */ -static int8_t _tcp_clear_events(void *arg) { - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return ERR_MEM; - } - e->event = LWIP_TCP_CLEAR; - e->arg = arg; - if (!_prepend_async_event(&e)) { - free((void *)(e)); - } - return ERR_OK; -} - static int8_t _tcp_connected(void *arg, tcp_pcb *pcb, int8_t err) { - // ets_printf("+C: 0x%08x\n", pcb); - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return ERR_MEM; - } - e->event = LWIP_TCP_CONNECTED; - e->arg = arg; - e->connected.pcb = pcb; - e->connected.err = err; - if (!_prepend_async_event(&e)) { - free((void *)(e)); + DEBUG_PRINTF("+C: 0x%08x", pcb); + AsyncClient *client = reinterpret_cast(arg); + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_CONNECTED, client, pcb); + if (e) { + e->connected.err = err; + _send_async_event(e); } return ERR_OK; } static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb) { - // throttle polling events queueing when event queue is getting filled up, let it handle _onack's - // log_d("qs:%u", uxQueueMessagesWaiting(_async_queue)); - if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) { - log_d("throttling"); - return ERR_OK; - } - - // ets_printf("+P: 0x%08x\n", pcb); - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return ERR_MEM; - } - e->event = LWIP_TCP_POLL; - e->arg = arg; - e->poll.pcb = pcb; - // poll events are not critical 'cause those are repetitive, so we may not wait the queue in any case - if (!_send_async_event(&e, 0)) { - free((void *)(e)); + DEBUG_PRINTF("+P: 0x%08x", pcb); + AsyncClient *client = reinterpret_cast(arg); + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_POLL, client, pcb); + if (e != nullptr) { + _send_async_event(e); } return ERR_OK; } static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return ERR_MEM; - } - e->arg = arg; - if (pb) { - // ets_printf("+R: 0x%08x\n", pcb); - e->event = LWIP_TCP_RECV; - e->recv.pcb = pcb; - e->recv.pb = pb; - e->recv.err = err; - } else { - // ets_printf("+F: 0x%08x\n", pcb); - e->event = LWIP_TCP_FIN; - e->fin.pcb = pcb; - e->fin.err = err; - // close the PCB in LwIP thread - AsyncClient::_s_lwip_fin(e->arg, e->fin.pcb, e->fin.err); - } - if (!_send_async_event(&e)) { - free((void *)(e)); + AsyncClient *client = reinterpret_cast(arg); + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); + if (e != nullptr) { + if (pb) { + DEBUG_PRINTF("+R: 0x%08x", pcb); + e->recv.pb = pb; + e->recv.err = err; + } else { + DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); + e->event = LWIP_TCP_FIN; + e->fin.err = err; + } + _send_async_event(e); } return ERR_OK; } static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { - // ets_printf("+S: 0x%08x\n", pcb); - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return ERR_MEM; - } - e->event = LWIP_TCP_SENT; - e->arg = arg; - e->sent.pcb = pcb; - e->sent.len = len; - if (!_send_async_event(&e)) { - free((void *)(e)); + DEBUG_PRINTF("+S: 0x%08x", pcb); + AsyncClient *client = reinterpret_cast(arg); + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); + if (e != nullptr) { + e->sent.len = len; + _send_async_event(e); } return ERR_OK; } static void _tcp_error(void *arg, int8_t err) { - // ets_printf("+E: 0x%08x\n", arg); - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return; - } - e->event = LWIP_TCP_ERROR; - e->arg = arg; + DEBUG_PRINTF("+E: 0x%08x", arg); + AsyncClient *client = reinterpret_cast(arg); + assert(client); + // The associated pcb is now invalid and will soon be deallocated + // We call on the preallocated end event from the client object + lwip_tcp_event_packet_t *e = AsyncClient_detail::invalidate_pcb(*client); + assert(e); e->error.err = err; - if (!_send_async_event(&e)) { - free((void *)(e)); - } + _remove_events_for(client); // FUTURE: we could hold the lock the whole time + _prepend_async_event(e); } static void _tcp_dns_found(const char *name, struct ip_addr *ipaddr, void *arg) { - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); - return; - } - // ets_printf("+DNS: name=%s ipaddr=0x%08x arg=%x\n", name, ipaddr, arg); - e->event = LWIP_TCP_DNS; - e->arg = arg; - e->dns.name = name; - if (ipaddr) { - memcpy(&e->dns.addr, ipaddr, sizeof(struct ip_addr)); - } else { - memset(&e->dns.addr, 0, sizeof(e->dns.addr)); - } - if (!_send_async_event(&e)) { - free((void *)(e)); + DEBUG_PRINTF("+DNS: name=%s ipaddr=0x%08x arg=%x", name, ipaddr, arg); + auto client = reinterpret_cast(arg); + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_DNS, client, client->pcb()); + if (e != nullptr) { + e->dns.name = name; + if (ipaddr) { + memcpy(&e->dns.addr, ipaddr, sizeof(struct ip_addr)); + } else { + memset(&e->dns.addr, 0, sizeof(e->dns.addr)); + } + _send_async_event(e); } } -// Used to switch out from LwIP thread -static int8_t _tcp_accept(void *arg, AsyncClient *client) { - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); - if (!e) { - log_e("Failed to allocate event packet"); +// Runs on LWIP thread +static int8_t _tcp_accept(AsyncServer *server, AsyncClient *client) { + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_ACCEPT, client, client->pcb()); + if (e) { + e->accept.server = server; + _send_async_event(e); + return ERR_OK; + } else { return ERR_MEM; } - e->event = LWIP_TCP_ACCEPT; - e->arg = arg; - e->accept.client = client; - if (!_prepend_async_event(&e)) { - free((void *)(e)); - } - return ERR_OK; } /* @@ -503,8 +524,7 @@ static int8_t _tcp_accept(void *arg, AsyncClient *client) { typedef struct { struct tcpip_api_call_data call; - tcp_pcb *pcb; - int8_t closed_slot; + tcp_pcb **pcb_ptr; // double indirection to manage races with client threads int8_t err; union { struct { @@ -526,89 +546,64 @@ typedef struct { }; } tcp_api_call_t; +// Given the multithreaded nature of this code, it's possible that pcb has +// been invalidated by the stack thread, but the client thread doesn't know +// yet. Before performing any operation on a pcb, check to make sure we +// are still tracking it. +// AsyncClient guarantees that the _pcb member can only be overwritten by +// the LwIP thread. +static inline bool pcb_is_active(tcp_api_call_t &p) { + return (p.pcb_ptr) && (*p.pcb_ptr); +} + static err_t _tcp_output_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->closed_slot == INVALID_CLOSED_SLOT || !_closed_slots[msg->closed_slot]) { - msg->err = tcp_output(msg->pcb); + if (pcb_is_active(*msg)) { + msg->err = tcp_output(*msg->pcb_ptr); } return msg->err; } -static esp_err_t _tcp_output(tcp_pcb *pcb, int8_t closed_slot) { - if (!pcb) { - return ERR_CONN; - } - tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; - tcpip_api_call(_tcp_output_api, (struct tcpip_api_call_data *)&msg); - return msg.err; -} - static err_t _tcp_write_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->closed_slot == INVALID_CLOSED_SLOT || !_closed_slots[msg->closed_slot]) { - msg->err = tcp_write(msg->pcb, msg->write.data, msg->write.size, msg->write.apiflags); + if (pcb_is_active(*msg)) { + msg->err = tcp_write(*msg->pcb_ptr, msg->write.data, msg->write.size, msg->write.apiflags); } return msg->err; } -static esp_err_t _tcp_write(tcp_pcb *pcb, int8_t closed_slot, const char *data, size_t size, uint8_t apiflags) { - if (!pcb) { - return ERR_CONN; - } - tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; - msg.write.data = data; - msg.write.size = size; - msg.write.apiflags = apiflags; - tcpip_api_call(_tcp_write_api, (struct tcpip_api_call_data *)&msg); - return msg.err; -} - static err_t _tcp_recved_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->closed_slot == INVALID_CLOSED_SLOT || !_closed_slots[msg->closed_slot]) { - // if(msg->closed_slot != INVALID_CLOSED_SLOT && !_closed_slots[msg->closed_slot]) { - // if(msg->closed_slot != INVALID_CLOSED_SLOT) { + if (pcb_is_active(*msg)) { msg->err = 0; - tcp_recved(msg->pcb, msg->received); + tcp_recved(*msg->pcb_ptr, msg->received); } return msg->err; } -static esp_err_t _tcp_recved(tcp_pcb *pcb, int8_t closed_slot, size_t len) { - if (!pcb) { - return ERR_CONN; - } - tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; - msg.received = len; - tcpip_api_call(_tcp_recved_api, (struct tcpip_api_call_data *)&msg); - return msg.err; -} - +// Sets *pcb_ptr to nullptr static err_t _tcp_close_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->closed_slot == INVALID_CLOSED_SLOT || !_closed_slots[msg->closed_slot]) { - msg->err = tcp_close(msg->pcb); + if (pcb_is_active(*msg)) { + _teardown_pcb(*msg->pcb_ptr); + msg->err = tcp_close(*msg->pcb_ptr); + if (msg->err == ERR_OK) { + *msg->pcb_ptr = nullptr; + } } return msg->err; } -static esp_err_t _tcp_close(tcp_pcb *pcb, int8_t closed_slot) { - if (!pcb) { +static esp_err_t _tcp_close(tcp_pcb **pcb_ptr) { + if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; } tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; + msg.pcb_ptr = pcb_ptr; tcpip_api_call(_tcp_close_api, (struct tcpip_api_call_data *)&msg); return msg.err; } @@ -616,46 +611,34 @@ static esp_err_t _tcp_close(tcp_pcb *pcb, int8_t closed_slot) { static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->closed_slot == INVALID_CLOSED_SLOT || !_closed_slots[msg->closed_slot]) { - tcp_abort(msg->pcb); + if (pcb_is_active(*msg)) { + _teardown_pcb(*msg->pcb_ptr); + tcp_abort(*msg->pcb_ptr); + *msg->pcb_ptr = nullptr; } return msg->err; } -static esp_err_t _tcp_abort(tcp_pcb *pcb, int8_t closed_slot) { - if (!pcb) { +static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr) { + if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; } tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; + msg.pcb_ptr = pcb_ptr; tcpip_api_call(_tcp_abort_api, (struct tcpip_api_call_data *)&msg); + assert(*pcb_ptr == nullptr); // must be true return msg.err; } static err_t _tcp_connect_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; - msg->err = tcp_connect(msg->pcb, msg->connect.addr, msg->connect.port, msg->connect.cb); + msg->err = tcp_connect(*msg->pcb_ptr, msg->connect.addr, msg->connect.port, msg->connect.cb); return msg->err; } -static esp_err_t _tcp_connect(tcp_pcb *pcb, int8_t closed_slot, ip_addr_t *addr, uint16_t port, tcp_connected_fn cb) { - if (!pcb) { - return ESP_FAIL; - } - tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = closed_slot; - msg.connect.addr = addr; - msg.connect.port = port; - msg.connect.cb = cb; - tcpip_api_call(_tcp_connect_api, (struct tcpip_api_call_data *)&msg); - return msg.err; -} - static err_t _tcp_bind_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; - msg->err = tcp_bind(msg->pcb, msg->bind.addr, msg->bind.port); + msg->err = tcp_bind(*msg->pcb_ptr, msg->bind.addr, msg->bind.port); return msg->err; } @@ -664,8 +647,7 @@ static esp_err_t _tcp_bind(tcp_pcb *pcb, ip_addr_t *addr, uint16_t port) { return ESP_FAIL; } tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = -1; + msg.pcb_ptr = &pcb; msg.bind.addr = addr; msg.bind.port = port; tcpip_api_call(_tcp_bind_api, (struct tcpip_api_call_data *)&msg); @@ -675,7 +657,7 @@ static esp_err_t _tcp_bind(tcp_pcb *pcb, ip_addr_t *addr, uint16_t port) { static err_t _tcp_listen_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = 0; - msg->pcb = tcp_listen_with_backlog(msg->pcb, msg->backlog); + *msg->pcb_ptr = tcp_listen_with_backlog(*msg->pcb_ptr, msg->backlog); return msg->err; } @@ -684,11 +666,10 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { return NULL; } tcp_api_call_t msg; - msg.pcb = pcb; - msg.closed_slot = -1; + msg.pcb_ptr = &pcb; msg.backlog = backlog ? backlog : 0xFF; tcpip_api_call(_tcp_listen_api, (struct tcpip_api_call_data *)&msg); - return msg.pcb; + return pcb; } /* @@ -696,36 +677,44 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { */ AsyncClient::AsyncClient(tcp_pcb *pcb) - : _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), - _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _poll_cb(0), _poll_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), - _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0), prev(NULL), next(NULL) { - _pcb = pcb; - _closed_slot = INVALID_CLOSED_SLOT; + : _pcb(pcb), _end_event(nullptr), _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), + _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), + _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) +#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST + , + prev(NULL), next(NULL) +#endif +{ if (_pcb) { + _end_event = _register_pcb(_pcb, this); _rx_last_packet = millis(); - tcp_arg(_pcb, this); - tcp_recv(_pcb, &_tcp_recv); - tcp_sent(_pcb, &_tcp_sent); - tcp_err(_pcb, &_tcp_error); - tcp_poll(_pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); - if (!_allocate_closed_slot()) { - _close(); + if (!_end_event) { + // Out of memory!! + log_e("Unable to allocate event"); + // Swallow this PCB, producing a null client object + tcp_abort(_pcb); + _pcb = nullptr; } } + DEBUG_PRINTF("+AC: 0x%08x -> 0x%08x", _pcb, (intptr_t)this); } AsyncClient::~AsyncClient() { if (_pcb) { _close(); } - _free_closed_slot(); + if (_end_event) { + _free_event(_end_event); + } + DEBUG_PRINTF("-AC: 0x%08x -> 0x%08x", _pcb, (intptr_t)this); } /* * Operators * */ -AsyncClient &AsyncClient::operator=(const AsyncClient &other) { +/* TODO +AsyncClient& AsyncClient::operator=(const AsyncClient& other) { if (_pcb) { _close(); } @@ -742,11 +731,13 @@ AsyncClient &AsyncClient::operator=(const AsyncClient &other) { } return *this; } +*/ bool AsyncClient::operator==(const AsyncClient &other) { return _pcb == other._pcb; } +#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST AsyncClient &AsyncClient::operator+=(const AsyncClient &other) { if (next == NULL) { next = (AsyncClient *)(&other); @@ -761,6 +752,7 @@ AsyncClient &AsyncClient::operator+=(const AsyncClient &other) { } return *this; } +#endif /* * Callback Setters @@ -820,11 +812,6 @@ bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { return false; } - if (!_allocate_closed_slot()) { - log_e("failed to allocate: closed slot full"); - return false; - } - TCP_MUTEX_LOCK(); tcp_pcb *pcb = tcp_new_ip_type(addr.type); if (!pcb) { @@ -832,15 +819,23 @@ bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { log_e("pcb == NULL"); return false; } - tcp_arg(pcb, this); - tcp_err(pcb, &_tcp_error); - tcp_recv(pcb, &_tcp_recv); - tcp_sent(pcb, &_tcp_sent); - tcp_poll(pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); + _end_event = _register_pcb(_pcb, this); TCP_MUTEX_UNLOCK(); - esp_err_t err = _tcp_connect(pcb, _closed_slot, &addr, port, (tcp_connected_fn)&_tcp_connected); - return err == ESP_OK; + if (!_end_event) { + log_e("Unable to allocate event"); + tcp_abort(_pcb); + _pcb = nullptr; + return false; + } + + tcp_api_call_t msg; + msg.pcb_ptr = &_pcb; + msg.connect.addr = &addr; + msg.connect.port = port; + msg.connect.cb = (tcp_connected_fn)&_tcp_connected; + tcpip_api_call(_tcp_connect_api, (struct tcpip_api_call_data *)&msg); + return msg.err == ESP_OK; } bool AsyncClient::connect(const IPAddress &ip, uint16_t port) { @@ -898,15 +893,14 @@ bool AsyncClient::connect(const char *host, uint16_t port) { void AsyncClient::close(bool now) { if (_pcb) { - _tcp_recved(_pcb, _closed_slot, _rx_ack_len); + _recved(_rx_ack_len); } _close(); } int8_t AsyncClient::abort() { if (_pcb) { - _tcp_abort(_pcb, _closed_slot); - _pcb = NULL; + _tcp_abort(&_pcb); } return ERR_ABRT; } @@ -926,19 +920,29 @@ size_t AsyncClient::add(const char *data, size_t size, uint8_t apiflags) { if (!room) { return 0; } - size_t will_send = (room < size) ? room : size; - int8_t err = ERR_OK; - err = _tcp_write(_pcb, _closed_slot, data, will_send, apiflags); - if (err != ERR_OK) { + tcp_api_call_t msg; + msg.pcb_ptr = &_pcb; + msg.err = ERR_OK; + msg.write.data = data; + msg.write.size = std::min(room, size); + msg.write.apiflags = apiflags; + tcpip_api_call(_tcp_write_api, (struct tcpip_api_call_data *)&msg); + if (msg.err != ERR_OK) { return 0; } - return will_send; + return msg.write.size; } bool AsyncClient::send() { auto backup = _tx_last_packet; _tx_last_packet = millis(); - if (_tcp_output(_pcb, _closed_slot) == ERR_OK) { + if (!_pcb) { + return false; + } + tcp_api_call_t msg; + msg.pcb_ptr = &_pcb; + tcpip_api_call(_tcp_output_api, (struct tcpip_api_call_data *)&msg); + if (msg.err == ERR_OK) { return true; } _tx_last_packet = backup; @@ -950,7 +954,7 @@ size_t AsyncClient::ack(size_t len) { len = _rx_ack_len; } if (len) { - _tcp_recved(_pcb, _closed_slot, len); + _recved(len); } _rx_ack_len -= len; return len; @@ -960,7 +964,7 @@ void AsyncClient::ackPacket(struct pbuf *pb) { if (!pb) { return; } - _tcp_recved(_pcb, _closed_slot, pb->len); + _recved(pb->len); pbuf_free(pb); } @@ -969,23 +973,14 @@ void AsyncClient::ackPacket(struct pbuf *pb) { * */ int8_t AsyncClient::_close() { - // ets_printf("X: 0x%08x\n", (uint32_t)this); + DEBUG_PRINTF("close: 0x%08x", (uint32_t)this); int8_t err = ERR_OK; if (_pcb) { - TCP_MUTEX_LOCK(); - tcp_arg(_pcb, NULL); - tcp_sent(_pcb, NULL); - tcp_recv(_pcb, NULL); - tcp_err(_pcb, NULL); - tcp_poll(_pcb, NULL, 0); - TCP_MUTEX_UNLOCK(); - _tcp_clear_events(this); - err = _tcp_close(_pcb, _closed_slot); + // log_i(""); + err = _tcp_close(&_pcb); if (err != ERR_OK) { err = abort(); } - _free_closed_slot(); - _pcb = NULL; if (_discard_cb) { _discard_cb(_discard_cb_arg, this); } @@ -993,47 +988,12 @@ int8_t AsyncClient::_close() { return err; } -bool AsyncClient::_allocate_closed_slot() { - bool allocated = false; - if (xSemaphoreTake(_slots_lock, portMAX_DELAY) == pdTRUE) { - uint32_t closed_slot_min_index = 0; - allocated = _closed_slot != INVALID_CLOSED_SLOT; - if (!allocated) { - for (int i = 0; i < _number_of_closed_slots; ++i) { - if ((_closed_slot == INVALID_CLOSED_SLOT || _closed_slots[i] <= closed_slot_min_index) && _closed_slots[i] != 0) { - closed_slot_min_index = _closed_slots[i]; - _closed_slot = i; - } - } - allocated = _closed_slot != INVALID_CLOSED_SLOT; - if (allocated) { - _closed_slots[_closed_slot] = 0; - } - } - xSemaphoreGive(_slots_lock); - } - return allocated; -} - -void AsyncClient::_free_closed_slot() { - xSemaphoreTake(_slots_lock, portMAX_DELAY); - if (_closed_slot != INVALID_CLOSED_SLOT) { - _closed_slots[_closed_slot] = _closed_index; - _closed_slot = INVALID_CLOSED_SLOT; - ++_closed_index; - } - xSemaphoreGive(_slots_lock); -} - /* * Private Callbacks * */ -int8_t AsyncClient::_connected(tcp_pcb *pcb, int8_t err) { - _pcb = reinterpret_cast(pcb); - if (_pcb) { - _rx_last_packet = millis(); - } +int8_t AsyncClient::_connected(int8_t err) { + _rx_last_packet = millis(); if (_connect_cb) { _connect_cb(_connect_cb_arg, this); } @@ -1041,19 +1001,6 @@ int8_t AsyncClient::_connected(tcp_pcb *pcb, int8_t err) { } void AsyncClient::_error(int8_t err) { - if (_pcb) { - TCP_MUTEX_LOCK(); - tcp_arg(_pcb, NULL); - if (_pcb->state == LISTEN) { - tcp_sent(_pcb, NULL); - tcp_recv(_pcb, NULL); - tcp_err(_pcb, NULL); - tcp_poll(_pcb, NULL, 0); - } - TCP_MUTEX_UNLOCK(); - _free_closed_slot(); - _pcb = NULL; - } if (_error_cb) { _error_cb(_error_cb_arg, this, err); } @@ -1062,37 +1009,16 @@ void AsyncClient::_error(int8_t err) { } } -// In LwIP Thread -int8_t AsyncClient::_lwip_fin(tcp_pcb *pcb, int8_t err) { - if (!_pcb || pcb != _pcb) { - log_d("0x%08x != 0x%08x", (uint32_t)pcb, (uint32_t)_pcb); - return ERR_OK; - } - tcp_arg(_pcb, NULL); - if (_pcb->state == LISTEN) { - tcp_sent(_pcb, NULL); - tcp_recv(_pcb, NULL); - tcp_err(_pcb, NULL); - tcp_poll(_pcb, NULL, 0); - } - if (tcp_close(_pcb) != ERR_OK) { - tcp_abort(_pcb); - } - _free_closed_slot(); - _pcb = NULL; - return ERR_OK; -} - // In Async Thread -int8_t AsyncClient::_fin(tcp_pcb *pcb, int8_t err) { - _tcp_clear_events(this); - if (_discard_cb) { - _discard_cb(_discard_cb_arg, this); - } +int8_t AsyncClient::_fin(int8_t err) { + // WM: This isn't strictly correct -- we should instead pass this to a callback + // _fin() merely indicates that the remote end is closing, it doesn't require us + // to close until we're done sending. + _close(); return ERR_OK; } -int8_t AsyncClient::_sent(tcp_pcb *pcb, uint16_t len) { +int8_t AsyncClient::_sent(uint16_t len) { _rx_last_ack = _rx_last_packet = millis(); if (_sent_cb) { _sent_cb(_sent_cb_arg, this, len, (_rx_last_packet - _tx_last_packet)); @@ -1100,7 +1026,7 @@ int8_t AsyncClient::_sent(tcp_pcb *pcb, uint16_t len) { return ERR_OK; } -int8_t AsyncClient::_recv(tcp_pcb *pcb, pbuf *pb, int8_t err) { +int8_t AsyncClient::_recv(pbuf *pb, int8_t err) { while (pb != NULL) { _rx_last_packet = millis(); // we should not ack before we assimilate the data @@ -1117,7 +1043,7 @@ int8_t AsyncClient::_recv(tcp_pcb *pcb, pbuf *pb, int8_t err) { if (!_ack_pcb) { _rx_ack_len += b->len; } else if (_pcb) { - _tcp_recved(_pcb, _closed_slot, b->len); + _recved(b->len); } } pbuf_free(b); @@ -1125,16 +1051,7 @@ int8_t AsyncClient::_recv(tcp_pcb *pcb, pbuf *pb, int8_t err) { return ERR_OK; } -int8_t AsyncClient::_poll(tcp_pcb *pcb) { - if (!_pcb) { - // log_d("pcb is NULL"); - return ERR_OK; - } - if (pcb != _pcb) { - log_d("0x%08x != 0x%08x", (uint32_t)pcb, (uint32_t)_pcb); - return ERR_OK; - } - +int8_t AsyncClient::_poll() { uint32_t now = millis(); // ACK Timeout @@ -1142,7 +1059,7 @@ int8_t AsyncClient::_poll(tcp_pcb *pcb) { const uint32_t one_day = 86400000; bool last_tx_is_after_last_ack = (_rx_last_ack - _tx_last_packet + one_day) < one_day; if (last_tx_is_after_last_ack && (now - _tx_last_packet) >= _ack_timeout) { - log_d("ack timeout %d", pcb->state); + log_d("ack timeout %d", _pcb->state); if (_timeout_cb) { _timeout_cb(_timeout_cb_arg, this, (now - _tx_last_packet)); } @@ -1151,7 +1068,7 @@ int8_t AsyncClient::_poll(tcp_pcb *pcb) { } // RX Timeout if (_rx_timeout && (now - _rx_last_packet) >= (_rx_timeout * 1000)) { - log_d("rx timeout %d", pcb->state); + log_d("rx timeout %d", _pcb->state); _close(); return ERR_OK; } @@ -1186,6 +1103,17 @@ void AsyncClient::_dns_found(struct ip_addr *ipaddr) { } } +int8_t AsyncClient::_recved(size_t len) { + if (!_pcb) { + return ERR_CONN; + } + tcp_api_call_t msg; + msg.pcb_ptr = &_pcb; + msg.received = len; + tcpip_api_call(_tcp_recved_api, (struct tcpip_api_call_data *)&msg); + return msg.err; +} + /* * Public Helper Methods * */ @@ -1243,6 +1171,9 @@ bool AsyncClient::getNoDelay() { } void AsyncClient::setKeepAlive(uint32_t ms, uint8_t cnt) { + if (!_pcb) { + return; + } if (ms != 0) { _pcb->so_options |= SOF_KEEPALIVE; // Turn on TCP Keepalive for the given pcb // Set the time between keepalive messages in milli-seconds @@ -1464,42 +1395,6 @@ const char *AsyncClient::stateToString() { } } -/* - * Static Callbacks (LwIP C2C++ interconnect) - * */ - -void AsyncClient::_s_dns_found(const char *name, struct ip_addr *ipaddr, void *arg) { - reinterpret_cast(arg)->_dns_found(ipaddr); -} - -int8_t AsyncClient::_s_poll(void *arg, struct tcp_pcb *pcb) { - return reinterpret_cast(arg)->_poll(pcb); -} - -int8_t AsyncClient::_s_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { - return reinterpret_cast(arg)->_recv(pcb, pb, err); -} - -int8_t AsyncClient::_s_fin(void *arg, struct tcp_pcb *pcb, int8_t err) { - return reinterpret_cast(arg)->_fin(pcb, err); -} - -int8_t AsyncClient::_s_lwip_fin(void *arg, struct tcp_pcb *pcb, int8_t err) { - return reinterpret_cast(arg)->_lwip_fin(pcb, err); -} - -int8_t AsyncClient::_s_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { - return reinterpret_cast(arg)->_sent(pcb, len); -} - -void AsyncClient::_s_error(void *arg, int8_t err) { - reinterpret_cast(arg)->_error(err); -} - -int8_t AsyncClient::_s_connected(void *arg, struct tcp_pcb *pcb, int8_t err) { - return reinterpret_cast(arg)->_connected(pcb, err); -} - /* Async TCP Server */ @@ -1574,8 +1469,7 @@ void AsyncServer::begin() { err = _tcp_bind(_pcb, &local_addr, _port); if (err != ERR_OK) { - _tcp_close(_pcb, -1); - _pcb = NULL; + _tcp_close(&_pcb); log_e("bind error: %d", err); return; } @@ -1599,7 +1493,7 @@ void AsyncServer::end() { tcp_accept(_pcb, NULL); if (tcp_close(_pcb) != ERR_OK) { TCP_MUTEX_UNLOCK(); - _tcp_abort(_pcb, -1); + _tcp_abort(&_pcb); } else { TCP_MUTEX_UNLOCK(); } @@ -1609,18 +1503,36 @@ void AsyncServer::end() { // runs on LwIP thread int8_t AsyncServer::_accept(tcp_pcb *pcb, int8_t err) { - // ets_printf("+A: 0x%08x\n", pcb); - if (_connect_cb) { - AsyncClient *c = new AsyncClient(pcb); - if (c) { - c->setNoDelay(_noDelay); - return _tcp_accept(this, c); + DEBUG_PRINTF("+A: 0x%08x %d", pcb, err); + if (pcb) { + if (_connect_cb) { + AsyncClient *c = new (std::nothrow) AsyncClient(pcb); + if (c && c->pcb()) { + c->setNoDelay(_noDelay); + if (_tcp_accept(this, c) == ERR_OK) { + return ERR_OK; // success + } + } + if (c->pcb()) { + // Couldn't allocate accept event + // We can't let the client object call in to close, as we're on the LWIP thread; it could deadlock trying to RPC to itself + AsyncClient_detail::invalidate_pcb(*c); + tcp_abort(pcb); + } + if (c) { + // Couldn't complete setup + // pcb has already been aborted + delete c; + pcb = nullptr; + } + } + if (pcb) { + if (tcp_close(pcb) != ERR_OK) { + tcp_abort(pcb); + } } } - if (tcp_close(pcb) != ERR_OK) { - tcp_abort(pcb); - } - log_d("FAIL"); + log_e("TCP ACCEPT FAIL"); return ERR_OK; } diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 389692f..d28905a 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -47,14 +47,14 @@ extern "C" { #define CONFIG_ASYNC_TCP_PRIORITY 10 #endif -#ifndef CONFIG_ASYNC_TCP_QUEUE_SIZE -#define CONFIG_ASYNC_TCP_QUEUE_SIZE 64 -#endif - #ifndef CONFIG_ASYNC_TCP_MAX_ACK_TIME #define CONFIG_ASYNC_TCP_MAX_ACK_TIME 5000 #endif +#ifndef CONFIG_ASYNCTCP_HAS_INTRUSIVE_LIST +#define CONFIG_ASYNCTCP_HAS_INTRUSIVE_LIST 1 +#endif + class AsyncClient; #define ASYNC_WRITE_FLAG_COPY 0x01 // will allocate new buffer to hold the data while sending (else will hold reference to the data given) @@ -69,6 +69,8 @@ typedef std::function AcTimeoutHandl struct tcp_pcb; struct ip_addr; +class AsyncClient_detail; +struct lwip_tcp_event_packet_t; class AsyncClient { public: @@ -76,13 +78,12 @@ class AsyncClient { ~AsyncClient(); AsyncClient &operator=(const AsyncClient &other); - AsyncClient &operator+=(const AsyncClient &other); bool operator==(const AsyncClient &other); - bool operator!=(const AsyncClient &other) { return !(*this == other); } + bool connect(const IPAddress &ip, uint16_t port); #if ESP_IDF_VERSION_MAJOR < 5 bool connect(const IPv6Address &ip, uint16_t port); @@ -144,7 +145,7 @@ class AsyncClient { size_t write(const char *data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); /** - * @brief add and enqueue data for sending + * @brief add and enque data for sending * @note treats data as null-terminated string * * @param data @@ -211,7 +212,6 @@ class AsyncClient { // set callback - data received (called if onPacket is not used) void onData(AcDataHandler cb, void *arg = 0); // set callback - data received - // !!! You MUST call ackPacket() or free the pbuf yourself to prevent memory leaks void onPacket(AcPacketHandler cb, void *arg = 0); // set callback - ack timeout void onTimeout(AcTimeoutHandler cb, void *arg = 0); @@ -230,26 +230,13 @@ class AsyncClient { static const char *errorToString(int8_t error); const char *stateToString(); - // internal callbacks - Do NOT call any of the functions below in user code! - static int8_t _s_poll(void *arg, struct tcp_pcb *tpcb); - static int8_t _s_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *pb, int8_t err); - static int8_t _s_fin(void *arg, struct tcp_pcb *tpcb, int8_t err); - static int8_t _s_lwip_fin(void *arg, struct tcp_pcb *tpcb, int8_t err); - static void _s_error(void *arg, int8_t err); - static int8_t _s_sent(void *arg, struct tcp_pcb *tpcb, uint16_t len); - static int8_t _s_connected(void *arg, struct tcp_pcb *tpcb, int8_t err); - static void _s_dns_found(const char *name, struct ip_addr *ipaddr, void *arg); - - int8_t _recv(tcp_pcb *pcb, pbuf *pb, int8_t err); tcp_pcb *pcb() { return _pcb; } protected: - bool _connect(ip_addr_t addr, uint16_t port); - tcp_pcb *_pcb; - int8_t _closed_slot; + lwip_tcp_event_packet_t *_end_event; AcConnectHandler _connect_cb; void *_connect_cb_arg; @@ -277,20 +264,25 @@ class AsyncClient { uint32_t _ack_timeout; uint16_t _connect_port; + friend class AsyncClient_detail; + bool _connect(ip_addr_t addr, uint16_t port); int8_t _close(); - void _free_closed_slot(); - bool _allocate_closed_slot(); - int8_t _connected(tcp_pcb *pcb, int8_t err); + int8_t _connected(int8_t err); void _error(int8_t err); - int8_t _poll(tcp_pcb *pcb); - int8_t _sent(tcp_pcb *pcb, uint16_t len); - int8_t _fin(tcp_pcb *pcb, int8_t err); + int8_t _poll(); + int8_t _sent(uint16_t len); + int8_t _fin(int8_t err); int8_t _lwip_fin(tcp_pcb *pcb, int8_t err); + int8_t _recv(pbuf *pb, int8_t err); void _dns_found(struct ip_addr *ipaddr); + int8_t _recved(size_t len); +#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST public: AsyncClient *prev; AsyncClient *next; + AsyncClient &operator+=(const AsyncClient &other); +#endif }; class AsyncServer { @@ -325,6 +317,7 @@ class AsyncServer { AcConnectHandler _connect_cb; void *_connect_cb_arg; + friend class AsyncClient_detail; int8_t _accept(tcp_pcb *newpcb, int8_t err); int8_t _accepted(AsyncClient *client); }; From 148ee430bc26949a94d752fd4a6f88ccc6b442de Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:30:59 +0000 Subject: [PATCH 02/34] Fix pre-commit suggestions --- src/AsyncTCP.cpp | 2 +- src/AsyncTCP.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 1a58238..3b3ac2a 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -164,7 +164,7 @@ class queue_mutex_guard { bool holds_mutex; public: - inline queue_mutex_guard() : holds_mutex(xSemaphoreTake(_async_queue_mutex, portMAX_DELAY)) {}; + inline queue_mutex_guard() : holds_mutex(xSemaphoreTake(_async_queue_mutex, portMAX_DELAY)){}; inline ~queue_mutex_guard() { if (holds_mutex) { xSemaphoreGive(_async_queue_mutex); diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index d28905a..92f7be6 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -145,7 +145,7 @@ class AsyncClient { size_t write(const char *data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); /** - * @brief add and enque data for sending + * @brief add and enqueue data for sending * @note treats data as null-terminated string * * @param data From 673fbc972706d200368a266178e76d024917b6a8 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:31:58 +0000 Subject: [PATCH 03/34] Remove AsyncClient intrusive list --- src/AsyncTCP.cpp | 24 +----------------------- src/AsyncTCP.h | 11 ----------- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 3b3ac2a..7b3f092 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -679,12 +679,7 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { AsyncClient::AsyncClient(tcp_pcb *pcb) : _pcb(pcb), _end_event(nullptr), _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), - _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) -#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST - , - prev(NULL), next(NULL) -#endif -{ + _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { if (_pcb) { _end_event = _register_pcb(_pcb, this); _rx_last_packet = millis(); @@ -737,23 +732,6 @@ bool AsyncClient::operator==(const AsyncClient &other) { return _pcb == other._pcb; } -#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST -AsyncClient &AsyncClient::operator+=(const AsyncClient &other) { - if (next == NULL) { - next = (AsyncClient *)(&other); - next->prev = this; - } else { - AsyncClient *c = next; - while (c->next != NULL) { - c = c->next; - } - c->next = (AsyncClient *)(&other); - c->next->prev = c; - } - return *this; -} -#endif - /* * Callback Setters * */ diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 92f7be6..d6655b3 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -51,10 +51,6 @@ extern "C" { #define CONFIG_ASYNC_TCP_MAX_ACK_TIME 5000 #endif -#ifndef CONFIG_ASYNCTCP_HAS_INTRUSIVE_LIST -#define CONFIG_ASYNCTCP_HAS_INTRUSIVE_LIST 1 -#endif - class AsyncClient; #define ASYNC_WRITE_FLAG_COPY 0x01 // will allocate new buffer to hold the data while sending (else will hold reference to the data given) @@ -276,13 +272,6 @@ class AsyncClient { int8_t _recv(pbuf *pb, int8_t err); void _dns_found(struct ip_addr *ipaddr); int8_t _recved(size_t len); - -#ifdef CONFIG_ASYNC_TCP_CLIENT_LIST -public: - AsyncClient *prev; - AsyncClient *next; - AsyncClient &operator+=(const AsyncClient &other); -#endif }; class AsyncServer { From 29d7ee50f2e15d9f4536aaa98018a1f2a8fd76db Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:33:45 +0000 Subject: [PATCH 04/34] Remove AsyncClient::operator=(const AsyncClient&) The semantics of this operation were error-prone; remove it to ensure safety. --- src/AsyncTCP.cpp | 20 -------------------- src/AsyncTCP.h | 2 -- 2 files changed, 22 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 7b3f092..cd9ae5d 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -708,26 +708,6 @@ AsyncClient::~AsyncClient() { * Operators * */ -/* TODO -AsyncClient& AsyncClient::operator=(const AsyncClient& other) { - if (_pcb) { - _close(); - } - - _pcb = other._pcb; - _closed_slot = other._closed_slot; - if (_pcb) { - _rx_last_packet = millis(); - tcp_arg(_pcb, this); - tcp_recv(_pcb, &_tcp_recv); - tcp_sent(_pcb, &_tcp_sent); - tcp_err(_pcb, &_tcp_error); - tcp_poll(_pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); - } - return *this; -} -*/ - bool AsyncClient::operator==(const AsyncClient &other) { return _pcb == other._pcb; } diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index d6655b3..f50508e 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -73,8 +73,6 @@ class AsyncClient { AsyncClient(tcp_pcb *pcb = 0); ~AsyncClient(); - AsyncClient &operator=(const AsyncClient &other); - bool operator==(const AsyncClient &other); bool operator!=(const AsyncClient &other) { return !(*this == other); From 7e0725acf490ff2d2cc834b0ebffd8931370171e Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 21:54:59 +0000 Subject: [PATCH 05/34] Explicitly indicate non-copyability --- src/AsyncTCP.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index f50508e..1b05e13 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -73,6 +73,14 @@ class AsyncClient { AsyncClient(tcp_pcb *pcb = 0); ~AsyncClient(); + // Noncopyable + AsyncClient(const AsyncClient &) = delete; + AsyncClient &operator=(const AsyncClient &) = delete; + + // Nonmovable + AsyncClient(AsyncClient &&) = delete; + AsyncClient &operator=(AsyncClient &&) = delete; + bool operator==(const AsyncClient &other); bool operator!=(const AsyncClient &other) { return !(*this == other); @@ -280,6 +288,15 @@ class AsyncServer { #endif AsyncServer(uint16_t port); ~AsyncServer(); + + // Noncopyable + AsyncServer(const AsyncServer &) = delete; + AsyncServer &operator=(const AsyncServer &) = delete; + + // Nonmovable + AsyncServer(AsyncServer &&) = delete; + AsyncServer &operator=(AsyncServer &&) = delete; + void onClient(AcConnectHandler cb, void *arg); void begin(); void end(); From 5fdb22261d5540a433813018367707f09f300ee1 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:34:55 +0000 Subject: [PATCH 06/34] Inline AsyncClient::operator== No reason for this to require a function call. --- src/AsyncTCP.cpp | 8 -------- src/AsyncTCP.h | 6 ++++-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index cd9ae5d..84797d7 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -704,14 +704,6 @@ AsyncClient::~AsyncClient() { DEBUG_PRINTF("-AC: 0x%08x -> 0x%08x", _pcb, (intptr_t)this); } -/* - * Operators - * */ - -bool AsyncClient::operator==(const AsyncClient &other) { - return _pcb == other._pcb; -} - /* * Callback Setters * */ diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 1b05e13..5afe949 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -81,8 +81,10 @@ class AsyncClient { AsyncClient(AsyncClient &&) = delete; AsyncClient &operator=(AsyncClient &&) = delete; - bool operator==(const AsyncClient &other); - bool operator!=(const AsyncClient &other) { + inline bool operator==(const AsyncClient &other) { + return _pcb == other._pcb; + } + inline bool operator!=(const AsyncClient &other) { return !(*this == other); } From f9191a90b38e3c67a61f4dc8a57c74ad96ee1ab4 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:43:36 +0000 Subject: [PATCH 07/34] Say goodbye to malloc Use new(std::nothrow) instead. --- src/AsyncTCP.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 84797d7..b34430d 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -123,7 +123,7 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient return nullptr; } - lwip_tcp_event_packet_t *e = (lwip_tcp_event_packet_t *)malloc(sizeof(lwip_tcp_event_packet_t)); + auto *e = new (std::nothrow) lwip_tcp_event_packet_t{nullptr, event, client}; if (!e) { // Allocation fail - abort client and give up @@ -135,9 +135,6 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient return nullptr; } - e->next = nullptr; - e->event = event; - e->client = client; #ifdef ASYNCTCP_VALIDATE_PCB e->pcb = pcb; #endif @@ -151,7 +148,7 @@ static void _free_event(lwip_tcp_event_packet_t *evpkt) { // We must free the packet buffer pbuf_free(evpkt->recv.pb); } - free(evpkt); + delete evpkt; } // Global variables From 3caa027a194bae559e7bc798950a2b945dd87dc9 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Wed, 5 Feb 2025 19:53:11 +0000 Subject: [PATCH 08/34] Consistently return ERR_MEM If any of the TCP callbacks fails to allocate, return ERR_MEM. --- src/AsyncTCP.cpp | 53 ++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index b34430d..53b53ae 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -427,10 +427,11 @@ static int8_t _tcp_connected(void *arg, tcp_pcb *pcb, int8_t err) { DEBUG_PRINTF("+C: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_CONNECTED, client, pcb); - if (e) { - e->connected.err = err; - _send_async_event(e); + if (e == nullptr) { + return ERR_MEM; } + e->connected.err = err; + _send_async_event(e); return ERR_OK; } @@ -438,27 +439,31 @@ static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb) { DEBUG_PRINTF("+P: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_POLL, client, pcb); - if (e != nullptr) { - _send_async_event(e); + if (e == nullptr) { + return ERR_MEM; } + _send_async_event(e); return ERR_OK; } static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); - if (e != nullptr) { - if (pb) { - DEBUG_PRINTF("+R: 0x%08x", pcb); - e->recv.pb = pb; - e->recv.err = err; - } else { - DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); - e->event = LWIP_TCP_FIN; - e->fin.err = err; - } - _send_async_event(e); + if (e == nullptr) { + return ERR_MEM; } + + if (pb) { + DEBUG_PRINTF("+R: 0x%08x", pcb); + e->recv.pb = pb; + e->recv.err = err; + } else { + DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); + e->event = LWIP_TCP_FIN; + e->fin.err = err; + } + _send_async_event(e); + return ERR_OK; } @@ -466,10 +471,11 @@ static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { DEBUG_PRINTF("+S: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); - if (e != nullptr) { - e->sent.len = len; - _send_async_event(e); + if (e == nullptr) { + return ERR_MEM; } + e->sent.len = len; + _send_async_event(e); return ERR_OK; } @@ -504,13 +510,12 @@ static void _tcp_dns_found(const char *name, struct ip_addr *ipaddr, void *arg) // Runs on LWIP thread static int8_t _tcp_accept(AsyncServer *server, AsyncClient *client) { lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_ACCEPT, client, client->pcb()); - if (e) { - e->accept.server = server; - _send_async_event(e); - return ERR_OK; - } else { + if (e == nullptr) { return ERR_MEM; } + e->accept.server = server; + _send_async_event(e); + return ERR_OK; } /* From 2321755bacbd5760899b39c1e613853f541406b9 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 6 Feb 2025 17:58:12 -0500 Subject: [PATCH 09/34] Fix tcp_poll timer Should be using the config setting. --- src/AsyncTCP.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 53b53ae..d16d4ec 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -284,7 +284,7 @@ static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) tcp_recv(pcb, &_tcp_recv); tcp_sent(pcb, &_tcp_sent); tcp_err(pcb, &_tcp_error); - tcp_poll(pcb, &_tcp_poll, 1); + tcp_poll(pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); }; return end_event; } From 26e49f6bd2d5e27295ce607be6dd2c42c90e1a73 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 6 Feb 2025 19:05:59 -0500 Subject: [PATCH 10/34] Fix merge error in _connect We can store the newly allocated handle directly in the object member. --- src/AsyncTCP.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index d16d4ec..e8015b8 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -765,14 +765,13 @@ bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { } TCP_MUTEX_LOCK(); - tcp_pcb *pcb = tcp_new_ip_type(addr.type); - if (!pcb) { + _pcb = tcp_new_ip_type(addr.type); + if (!_pcb) { TCP_MUTEX_UNLOCK(); log_e("pcb == NULL"); return false; } _end_event = _register_pcb(_pcb, this); - TCP_MUTEX_UNLOCK(); if (!_end_event) { log_e("Unable to allocate event"); @@ -780,6 +779,7 @@ bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { _pcb = nullptr; return false; } + TCP_MUTEX_UNLOCK(); tcp_api_call_t msg; msg.pcb_ptr = &_pcb; From f3a5256c52170df5380d24eb2f40d9bf36b63e3c Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 6 Feb 2025 19:09:37 -0500 Subject: [PATCH 11/34] AsyncClient::_connect: Pass addr by const reference We don't plan on writing to it - might as well save the copying. --- src/AsyncTCP.cpp | 7 +++++-- src/AsyncTCP.h | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index e8015b8..45d6d0d 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -536,7 +536,7 @@ typedef struct { } write; size_t received; struct { - ip_addr_t *addr; + const ip_addr_t *addr; uint16_t port; tcp_connected_fn cb; } connect; @@ -634,6 +634,9 @@ static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr) { static err_t _tcp_connect_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; + Serial.printf("Attempting connection with PCB %08X, ", (intptr_t)*msg->pcb_ptr); + Serial.print(IPAddress(msg->connect.addr)); + Serial.printf(", port %d\n", msg->connect.port); msg->err = tcp_connect(*msg->pcb_ptr, msg->connect.addr, msg->connect.port, msg->connect.cb); return msg->err; } @@ -754,7 +757,7 @@ void AsyncClient::onPoll(AcConnectHandler cb, void *arg) { * Main Public Methods * */ -bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { +bool AsyncClient::_connect(const ip_addr_t &addr, uint16_t port) { if (_pcb) { log_d("already connected, state %d", _pcb->state); return false; diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 5afe949..e0b9aca 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -269,7 +269,7 @@ class AsyncClient { uint16_t _connect_port; friend class AsyncClient_detail; - bool _connect(ip_addr_t addr, uint16_t port); + bool _connect(const ip_addr_t &addr, uint16_t port); int8_t _close(); int8_t _connected(int8_t err); void _error(int8_t err); From 50e0086595f439a0778d9ae8ee53d8c202d27dfd Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 6 Feb 2025 19:19:01 -0500 Subject: [PATCH 12/34] Revert incorrect debug prints --- src/AsyncTCP.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 45d6d0d..336c071 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -634,9 +634,6 @@ static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr) { static err_t _tcp_connect_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; - Serial.printf("Attempting connection with PCB %08X, ", (intptr_t)*msg->pcb_ptr); - Serial.print(IPAddress(msg->connect.addr)); - Serial.printf(", port %d\n", msg->connect.port); msg->err = tcp_connect(*msg->pcb_ptr, msg->connect.addr, msg->connect.port, msg->connect.cb); return msg->err; } From f6e1e05f679361377d8eda02b7587a74e2c065e5 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Sat, 8 Feb 2025 09:43:59 -0500 Subject: [PATCH 13/34] Remove invalid check on _accept pcb is already aborted by the constructor. --- src/AsyncTCP.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 336c071..bc59c67 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -1465,12 +1465,6 @@ int8_t AsyncServer::_accept(tcp_pcb *pcb, int8_t err) { return ERR_OK; // success } } - if (c->pcb()) { - // Couldn't allocate accept event - // We can't let the client object call in to close, as we're on the LWIP thread; it could deadlock trying to RPC to itself - AsyncClient_detail::invalidate_pcb(*c); - tcp_abort(pcb); - } if (c) { // Couldn't complete setup // pcb has already been aborted From 863a182e551b5c6a4c56b8816041a665d25f18b0 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Sat, 8 Feb 2025 09:45:20 -0500 Subject: [PATCH 14/34] Fix locking in AsyncServer::end() --- src/AsyncTCP.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index bc59c67..4112d34 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -1444,12 +1444,10 @@ void AsyncServer::end() { tcp_arg(_pcb, NULL); tcp_accept(_pcb, NULL); if (tcp_close(_pcb) != ERR_OK) { - TCP_MUTEX_UNLOCK(); _tcp_abort(&_pcb); - } else { - TCP_MUTEX_UNLOCK(); } _pcb = NULL; + TCP_MUTEX_UNLOCK(); } } From 5a1989f735df4de21c6f2092077cda5a26e869e7 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 13 Feb 2025 21:36:04 -0500 Subject: [PATCH 15/34] Add @notes to connect and abort --- src/AsyncTCP.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index e0b9aca..d4157f4 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -95,6 +95,7 @@ class AsyncClient { bool connect(const char *host, uint16_t port); /** * @brief close connection + * @note will call onDisconnect callback * * @param now - ignored */ @@ -103,6 +104,13 @@ class AsyncClient { void stop() { close(false); }; + + /** + * @brief abort connection + * @note does NOT call onDisconnect callback! + * + * @return always returns ERR_ABRT + */ int8_t abort(); bool free(); From 9d3dec170f10a7373a01d4703db2d220f99fba99 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 13 Feb 2025 21:38:23 -0500 Subject: [PATCH 16/34] Use double indirection for _async_queue_tail This simplies the queue operations. --- src/AsyncTCP.cpp | 56 ++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 4112d34..47660da 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -153,7 +153,8 @@ static void _free_event(lwip_tcp_event_packet_t *evpkt) { // Global variables static SemaphoreHandle_t _async_queue_mutex = nullptr; -static lwip_tcp_event_packet_t *_async_queue_head = nullptr, *_async_queue_tail = nullptr; +static lwip_tcp_event_packet_t *_async_queue_head = nullptr; +static lwip_tcp_event_packet_t **_async_queue_tail = &_async_queue_head; // double indirection! static TaskHandle_t _async_service_task_handle = NULL; namespace { @@ -184,14 +185,12 @@ static inline bool _init_async_event_queue() { } static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { + assert(e != nullptr); + assert(_async_queue_tail != nullptr); queue_mutex_guard guard; if (guard) { - if (_async_queue_tail) { - _async_queue_tail->next = e; - } else { - _async_queue_head = e; - } - _async_queue_tail = e; + *_async_queue_tail = e; + _async_queue_tail = &e->next; #ifdef ASYNC_TCP_DEBUG uint32_t n; xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); @@ -204,14 +203,16 @@ static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { } static inline bool _prepend_async_event(lwip_tcp_event_packet_t *e) { + assert(e != nullptr); + assert(_async_queue_tail != nullptr); queue_mutex_guard guard; if (guard) { - if (_async_queue_head) { - e->next = _async_queue_head; - } else { - _async_queue_tail = e; + e->next = _async_queue_head; + if (e->next == nullptr) { + _async_queue_tail = &e->next; } _async_queue_head = e; + #ifdef ASYNC_TCP_DEBUG uint32_t n; xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); @@ -227,12 +228,12 @@ static inline lwip_tcp_event_packet_t *_get_async_event() { queue_mutex_guard guard; lwip_tcp_event_packet_t *e = nullptr; if (guard) { - e = _async_queue_head; if (_async_queue_head) { - _async_queue_head = _async_queue_head->next; - } - if (!_async_queue_head) { - _async_queue_tail = nullptr; + e = _async_queue_head; + if (_async_queue_tail == &(e->next)) { + _async_queue_tail = &_async_queue_head; + } + _async_queue_head = e->next; } DEBUG_PRINTF("GAA: 0x%08x -> 0x%08x 0x%08x", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail); } @@ -243,22 +244,21 @@ static bool _remove_events_for(AsyncClient *client) { queue_mutex_guard guard; if (guard) { auto count = 0U, total = 0U; - auto current = _async_queue_head; - auto prev = decltype(current){nullptr}; - while (current != nullptr) { + auto event_ptr = &_async_queue_head; + while (*event_ptr != nullptr) { ++total; - if (current->client == client) { + auto *event = *event_ptr; + if (event->client == client) { ++count; - auto last_next = prev ? &prev->next : &_async_queue_head; - *last_next = current->next; - if (_async_queue_tail == current) { - _async_queue_tail = prev; + *event_ptr = event->next; + if (event->next == nullptr) { + _async_queue_tail = event_ptr; } - _free_event(current); - current = *last_next; + _free_event(event); + // do not advance event_ptr } else { - prev = current; - current = current->next; + // advance event_ptr + event_ptr = &(*event_ptr)->next; } } DEBUG_PRINTF("_REF: Removed %d/%d for 0x%08x", count, total, (intptr_t)client); From 2df8999ff1a54cef69702da6bc5f3590e5388bab Mon Sep 17 00:00:00 2001 From: Will Miles Date: Sat, 15 Feb 2025 21:41:59 -0500 Subject: [PATCH 17/34] Break queue out to class For clarity of implementation --- src/AsyncTCP.cpp | 57 ++++++------------- src/simple_intrusive_list.h | 106 ++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 39 deletions(-) create mode 100644 src/simple_intrusive_list.h diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 47660da..9700f0b 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -4,6 +4,7 @@ #include "Arduino.h" #include "AsyncTCP.h" +#include "simple_intrusive_list.h" extern "C" { #include "lwip/dns.h" @@ -153,8 +154,7 @@ static void _free_event(lwip_tcp_event_packet_t *evpkt) { // Global variables static SemaphoreHandle_t _async_queue_mutex = nullptr; -static lwip_tcp_event_packet_t *_async_queue_head = nullptr; -static lwip_tcp_event_packet_t **_async_queue_tail = &_async_queue_head; // double indirection! +static simple_intrusive_list _async_queue; static TaskHandle_t _async_service_task_handle = NULL; namespace { @@ -186,15 +186,13 @@ static inline bool _init_async_event_queue() { static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { assert(e != nullptr); - assert(_async_queue_tail != nullptr); queue_mutex_guard guard; if (guard) { - *_async_queue_tail = e; - _async_queue_tail = &e->next; + _async_queue.push_back(e); #ifdef ASYNC_TCP_DEBUG uint32_t n; xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); - DEBUG_PRINTF("SAA: 0x%08x -> 0x%08x 0x%08x - %d", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail, n); + DEBUG_PRINTF("0x%08x", (intptr_t)e); #else xTaskNotifyGive(_async_service_task_handle); #endif @@ -204,19 +202,14 @@ static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { static inline bool _prepend_async_event(lwip_tcp_event_packet_t *e) { assert(e != nullptr); - assert(_async_queue_tail != nullptr); queue_mutex_guard guard; if (guard) { - e->next = _async_queue_head; - if (e->next == nullptr) { - _async_queue_tail = &e->next; - } - _async_queue_head = e; + _async_queue.push_front(e); #ifdef ASYNC_TCP_DEBUG uint32_t n; xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); - DEBUG_PRINTF("PAA: 0x%08x -> 0x%08x 0x%08x - %d", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail, n); + DEBUG_PRINTF("0x%08x", (intptr_t)e); #else xTaskNotifyGive(_async_service_task_handle); #endif @@ -228,40 +221,26 @@ static inline lwip_tcp_event_packet_t *_get_async_event() { queue_mutex_guard guard; lwip_tcp_event_packet_t *e = nullptr; if (guard) { - if (_async_queue_head) { - e = _async_queue_head; - if (_async_queue_tail == &(e->next)) { - _async_queue_tail = &_async_queue_head; - } - _async_queue_head = e->next; - } - DEBUG_PRINTF("GAA: 0x%08x -> 0x%08x 0x%08x", (intptr_t)e, (intptr_t)_async_queue_head, (intptr_t)_async_queue_tail); + e = _async_queue.pop_front(); } + DEBUG_PRINTF("0x%08x", (intptr_t)e); return e; } static bool _remove_events_for(AsyncClient *client) { queue_mutex_guard guard; if (guard) { - auto count = 0U, total = 0U; - auto event_ptr = &_async_queue_head; - while (*event_ptr != nullptr) { - ++total; - auto *event = *event_ptr; - if (event->client == client) { - ++count; - *event_ptr = event->next; - if (event->next == nullptr) { - _async_queue_tail = event_ptr; - } - _free_event(event); - // do not advance event_ptr - } else { - // advance event_ptr - event_ptr = &(*event_ptr)->next; - } + auto removed_events = _async_queue.remove_if([=](lwip_tcp_event_packet_t &pkt) { + return pkt.client == client; + }); + size_t count = 0; + while (removed_events) { + ++count; + auto t = removed_events; + removed_events = removed_events->next; + _free_event(t); } - DEBUG_PRINTF("_REF: Removed %d/%d for 0x%08x", count, total, (intptr_t)client); + DEBUG_PRINTF("Removed %d for 0x%08x", count, (intptr_t)client); }; return (bool)guard; }; diff --git a/src/simple_intrusive_list.h b/src/simple_intrusive_list.h new file mode 100644 index 0000000..3b8b84a --- /dev/null +++ b/src/simple_intrusive_list.h @@ -0,0 +1,106 @@ +// Simple intrusive list class + +template class simple_intrusive_list { + static_assert(std::is_same().next), T *>::value, "Template type must have public 'T* next' member"); + +private: + T *_head; + T **_tail; + +public: + // Static utility methods + static size_t list_size(T *chain) { + size_t count = 0; + for (auto c = chain; c != nullptr; c = c->next) { + ++count; + } + return count; + } + + static void delete_list(T *chain) { + while (chain) { + auto t = chain; + chain = chain->next; + delete t; + } + } + +public: + // Object methods + + simple_intrusive_list() : _head(nullptr), _tail(&_head) {} + ~simple_intrusive_list() { + clear(); + } + + // Noncopyable, nonmovable + simple_intrusive_list(const simple_intrusive_list &) = delete; + simple_intrusive_list(simple_intrusive_list &&) = delete; + simple_intrusive_list &operator=(const simple_intrusive_list &) = delete; + simple_intrusive_list &operator=(simple_intrusive_list &&) = delete; + + void push_back(T *obj) { + if (obj) { + *_tail = obj; + _tail = &obj->next; + } + } + + void push_front(T *obj) { + if (obj) { + if (_head == nullptr) { + _tail = &obj->next; + } + obj->next = _head; + _head = obj; + } + } + + T *pop_front() { + auto rv = _head; + if (_head) { + if (_tail == &_head->next) { + _tail = &_head; + } + _head = _head->next; + } + return rv; + } + + void clear() { + // Assumes all elements were allocated with "new" + delete_list(_head); + _head = nullptr; + _tail = &_head; + } + + size_t size() const { + return list_size(_head); + } + + T *remove_if(const std::function &condition) { + T *removed = nullptr; + auto **current_ptr = &_head; + while (*current_ptr != nullptr) { + auto *current = *current_ptr; + if (condition(*current)) { + *current_ptr = current->next; + if (current->next == nullptr) { + _tail = current_ptr; + } + current->next = removed; + removed = current; + // do not advance current_ptr + } else { + // advance current_ptr + current_ptr = &(*current_ptr)->next; + } + } + + return removed; + } + + T *begin() const { + return _head; + } +}; // class simple_intrusive_list From 96aee4be14317eef81d04713c9b6d64eddcd89cf Mon Sep 17 00:00:00 2001 From: Will Miles Date: Thu, 13 Feb 2025 21:43:14 -0500 Subject: [PATCH 18/34] Gift end event to queue Eliminates any possibility that a client may have its end event queued twice. LwIP should prevent this anyways, but it never hurts to be safe. --- src/AsyncTCP.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 9700f0b..e80dd3d 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -249,8 +249,10 @@ static bool _remove_events_for(AsyncClient *client) { class AsyncClient_detail { public: static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client) { + auto end_event = client._end_event; client._pcb = nullptr; - return client._end_event; + client._end_event = nullptr; + return end_event; }; static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); }; @@ -290,7 +292,6 @@ void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { if (e->client) { e->client->_error(e->error.err); } - return; // do not free this event, it belongs to the client } else if (e->event == LWIP_TCP_DNS) { // client has no PCB allocated yet DEBUG_PRINTF("-D: 0x%08x %s = %s", e->client, e->dns.name, ipaddr_ntoa(&e->dns.addr)); e->client->_dns_found(&e->dns.addr); @@ -465,10 +466,13 @@ static void _tcp_error(void *arg, int8_t err) { // The associated pcb is now invalid and will soon be deallocated // We call on the preallocated end event from the client object lwip_tcp_event_packet_t *e = AsyncClient_detail::invalidate_pcb(*client); - assert(e); - e->error.err = err; - _remove_events_for(client); // FUTURE: we could hold the lock the whole time - _prepend_async_event(e); + if (e) { + e->error.err = err; + _remove_events_for(client); // FUTURE: we could hold the lock the whole time + _prepend_async_event(e); + } else { + log_e("tcp_error call for client %X with no end event", (intptr_t)client); + } } static void _tcp_dns_found(const char *name, struct ip_addr *ipaddr, void *arg) { From b862fbc9e7b02e8c4c1bfddbdf4f9ee4d1cee6c5 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Fri, 21 Feb 2025 21:01:48 -0500 Subject: [PATCH 19/34] simple_intrusive_list: Use typedefs --- src/simple_intrusive_list.h | 42 +++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/simple_intrusive_list.h b/src/simple_intrusive_list.h index 3b8b84a..40e5d86 100644 --- a/src/simple_intrusive_list.h +++ b/src/simple_intrusive_list.h @@ -3,13 +3,13 @@ template class simple_intrusive_list { static_assert(std::is_same().next), T *>::value, "Template type must have public 'T* next' member"); -private: - T *_head; - T **_tail; - public: + typedef T value_type; + typedef value_type *value_ptr_type; + typedef value_ptr_type *value_ptr_ptr_type; + // Static utility methods - static size_t list_size(T *chain) { + static size_t list_size(value_ptr_type chain) { size_t count = 0; for (auto c = chain; c != nullptr; c = c->next) { ++count; @@ -17,7 +17,7 @@ template class simple_intrusive_list { return count; } - static void delete_list(T *chain) { + static void delete_list(value_ptr_type chain) { while (chain) { auto t = chain; chain = chain->next; @@ -39,14 +39,14 @@ template class simple_intrusive_list { simple_intrusive_list &operator=(const simple_intrusive_list &) = delete; simple_intrusive_list &operator=(simple_intrusive_list &&) = delete; - void push_back(T *obj) { + inline void push_back(value_ptr_type obj) { if (obj) { *_tail = obj; _tail = &obj->next; } } - void push_front(T *obj) { + inline void push_front(value_ptr_type obj) { if (obj) { if (_head == nullptr) { _tail = &obj->next; @@ -56,7 +56,7 @@ template class simple_intrusive_list { } } - T *pop_front() { + inline value_ptr_type pop_front() { auto rv = _head; if (_head) { if (_tail == &_head->next) { @@ -67,27 +67,30 @@ template class simple_intrusive_list { return rv; } - void clear() { + inline void clear() { // Assumes all elements were allocated with "new" delete_list(_head); _head = nullptr; _tail = &_head; } - size_t size() const { + inline size_t size() const { return list_size(_head); } - T *remove_if(const std::function &condition) { - T *removed = nullptr; - auto **current_ptr = &_head; + template inline value_ptr_type remove_if(const function_type &condition) { + value_ptr_type removed = nullptr; + value_ptr_ptr_type current_ptr = &_head; while (*current_ptr != nullptr) { - auto *current = *current_ptr; + value_ptr_type current = *current_ptr; if (condition(*current)) { + // Remove this item from the list by moving the next item in *current_ptr = current->next; + // If we were the last item, reset tail if (current->next == nullptr) { _tail = current_ptr; } + // Prepend this item to the removed list current->next = removed; removed = current; // do not advance current_ptr @@ -97,10 +100,17 @@ template class simple_intrusive_list { } } + // Return the removed entries return removed; } - T *begin() const { + inline value_ptr_type begin() const { return _head; } + +private: + // Data members + value_ptr_type _head; + value_ptr_ptr_type _tail; + }; // class simple_intrusive_list From 28ab534089161601d933e36885b797f443d9d6f7 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Fri, 21 Feb 2025 21:04:29 -0500 Subject: [PATCH 20/34] simple_intrusive_list: Add validation call --- src/simple_intrusive_list.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/simple_intrusive_list.h b/src/simple_intrusive_list.h index 40e5d86..a68d1a2 100644 --- a/src/simple_intrusive_list.h +++ b/src/simple_intrusive_list.h @@ -108,6 +108,17 @@ template class simple_intrusive_list { return _head; } + bool validate_tail() const { + if (_head == nullptr) { + return (_tail == &_head); + } + auto p = _head; + while (p->next != nullptr) { + p = p->next; + } + return _tail == &p->next; + } + private: // Data members value_ptr_type _head; From e763c29073bb14bc9f52eebe9b2fe497e3c38db9 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Fri, 21 Feb 2025 21:10:26 -0500 Subject: [PATCH 21/34] Expand _remove_events_for debug --- src/AsyncTCP.cpp | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index e80dd3d..49c9075 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -230,17 +230,29 @@ static inline lwip_tcp_event_packet_t *_get_async_event() { static bool _remove_events_for(AsyncClient *client) { queue_mutex_guard guard; if (guard) { - auto removed_events = _async_queue.remove_if([=](lwip_tcp_event_packet_t &pkt) { +#ifdef ASYNC_TCP_DEBUG + auto start_length = _async_queue.size(); +#endif + + auto removed_event_chain = _async_queue.remove_if([=](lwip_tcp_event_packet_t &pkt) { return pkt.client == client; }); + size_t count = 0; - while (removed_events) { + while (removed_event_chain) { ++count; - auto t = removed_events; - removed_events = removed_events->next; + auto t = removed_event_chain; + removed_event_chain = t->next; _free_event(t); } - DEBUG_PRINTF("Removed %d for 0x%08x", count, (intptr_t)client); + +#ifdef ASYNC_TCP_DEBUG + auto end_length = _async_queue.size(); + assert(count + end_length == start_length); + assert(_async_queue.validate_tail()); + + DEBUG_PRINTF("Removed %d/%d for 0x%08x", count, start_length, (intptr_t)client); +#endif }; return (bool)guard; }; From bbd42d0c5020ab2a5bdd15e6139ec0b3c20c63e5 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 14:58:26 +0000 Subject: [PATCH 22/34] Clarify discard execution Ensure that the discard event is run once and exactly once for each client. --- src/AsyncTCP.cpp | 74 ++++++++++++++++++++++++++---------------------- src/AsyncTCP.h | 1 + 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 49c9075..3a5a5fb 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -285,15 +285,11 @@ static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) static void _teardown_pcb(tcp_pcb *pcb) { assert(pcb); // Do teardown - auto old_arg = pcb->callback_arg; tcp_arg(pcb, NULL); tcp_sent(pcb, NULL); tcp_recv(pcb, NULL); tcp_err(pcb, NULL); tcp_poll(pcb, NULL, 0); - if (old_arg) { - _remove_events_for(reinterpret_cast(old_arg)); - } } void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { @@ -472,7 +468,7 @@ static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { } static void _tcp_error(void *arg, int8_t err) { - DEBUG_PRINTF("+E: 0x%08x", arg); + DEBUG_PRINTF("+E: 0x%08x %d", arg, err); AsyncClient *client = reinterpret_cast(arg); assert(client); // The associated pcb is now invalid and will soon be deallocated @@ -539,6 +535,7 @@ typedef struct { ip_addr_t *addr; uint16_t port; } bind; + AsyncClient *client; // used for close() and abort() uint8_t backlog; }; } tcp_api_call_t; @@ -585,29 +582,37 @@ static err_t _tcp_recved_api(struct tcpip_api_call_data *api_call_msg) { static err_t _tcp_close_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; + if (msg->client) { + // Client has requested close; purge all events from queue + _remove_events_for(msg->client); + } if (pcb_is_active(*msg)) { _teardown_pcb(*msg->pcb_ptr); msg->err = tcp_close(*msg->pcb_ptr); - if (msg->err == ERR_OK) { - *msg->pcb_ptr = nullptr; + if (msg->err != ERR_OK) { + tcp_abort(*msg->pcb_ptr); } + *msg->pcb_ptr = nullptr; } return msg->err; } -static esp_err_t _tcp_close(tcp_pcb **pcb_ptr) { +static esp_err_t _tcp_close(tcp_pcb **pcb_ptr, AsyncClient *client) { if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; } tcp_api_call_t msg; msg.pcb_ptr = pcb_ptr; + msg.client = client; tcpip_api_call(_tcp_close_api, (struct tcpip_api_call_data *)&msg); return msg.err; } +// Sets *pcb_ptr to nullptr static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; + _remove_events_for(msg->client); if (pcb_is_active(*msg)) { _teardown_pcb(*msg->pcb_ptr); tcp_abort(*msg->pcb_ptr); @@ -616,12 +621,13 @@ static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { return msg->err; } -static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr) { +static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr, AsyncClient *client) { if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; } tcp_api_call_t msg; msg.pcb_ptr = pcb_ptr; + msg.client = client; tcpip_api_call(_tcp_abort_api, (struct tcpip_api_call_data *)&msg); assert(*pcb_ptr == nullptr); // must be true return msg.err; @@ -674,9 +680,9 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { */ AsyncClient::AsyncClient(tcp_pcb *pcb) - : _pcb(pcb), _end_event(nullptr), _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), - _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), - _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { + : _pcb(pcb), _end_event(nullptr), _needs_discard(pcb != nullptr), _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), + _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), + _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { if (_pcb) { _end_event = _register_pcb(_pcb, this); _rx_last_packet = millis(); @@ -686,19 +692,23 @@ AsyncClient::AsyncClient(tcp_pcb *pcb) // Swallow this PCB, producing a null client object tcp_abort(_pcb); _pcb = nullptr; + _needs_discard = false; } } - DEBUG_PRINTF("+AC: 0x%08x -> 0x%08x", _pcb, (intptr_t)this); + DEBUG_PRINTF("+AC: 0x%08x -> 0x%08x [0x%08x]", _pcb, (intptr_t)this, (intptr_t)_end_event); } AsyncClient::~AsyncClient() { + DEBUG_PRINTF("-AC: 0x%08x -> 0x%08x [0x%08x]", _pcb, (intptr_t)this, (intptr_t)_end_event); if (_pcb) { _close(); } if (_end_event) { _free_event(_end_event); } - DEBUG_PRINTF("-AC: 0x%08x -> 0x%08x", _pcb, (intptr_t)this); + assert(_needs_discard == false); // If we needed the discard callback, it must have been called by now + // We take care to clear this flag before calling the discard callback + // as the discard callback commonly destructs the AsyncClient object. } /* @@ -774,6 +784,7 @@ bool AsyncClient::_connect(const ip_addr_t &addr, uint16_t port) { _pcb = nullptr; return false; } + _needs_discard = true; TCP_MUTEX_UNLOCK(); tcp_api_call_t msg; @@ -847,7 +858,8 @@ void AsyncClient::close(bool now) { int8_t AsyncClient::abort() { if (_pcb) { - _tcp_abort(&_pcb); + _tcp_abort(&_pcb, this); + _needs_discard = false; } return ERR_ABRT; } @@ -920,17 +932,13 @@ void AsyncClient::ackPacket(struct pbuf *pb) { * */ int8_t AsyncClient::_close() { - DEBUG_PRINTF("close: 0x%08x", (uint32_t)this); - int8_t err = ERR_OK; - if (_pcb) { - // log_i(""); - err = _tcp_close(&_pcb); - if (err != ERR_OK) { - err = abort(); - } - if (_discard_cb) { - _discard_cb(_discard_cb_arg, this); - } + DEBUG_PRINTF("close: 0x%08x -> 0x%08x, %d", (uint32_t)this, _pcb, _needs_discard); + // We always call close, regardless of current state + int8_t err = _tcp_close(&_pcb, this); + auto call_discard = _needs_discard; + _needs_discard = false; + if (call_discard && _discard_cb) { + _discard_cb(_discard_cb_arg, this); } return err; } @@ -948,9 +956,11 @@ int8_t AsyncClient::_connected(int8_t err) { } void AsyncClient::_error(int8_t err) { + assert(_needs_discard); if (_error_cb) { _error_cb(_error_cb_arg, this, err); } + _needs_discard = false; if (_discard_cb) { _discard_cb(_discard_cb_arg, this); } @@ -1041,12 +1051,8 @@ void AsyncClient::_dns_found(struct ip_addr *ipaddr) { connect(ip, _connect_port); #endif } else { - if (_error_cb) { - _error_cb(_error_cb_arg, this, -55); - } - if (_discard_cb) { - _discard_cb(_discard_cb_arg, this); - } + // No address found + this->_error(-55); } } @@ -1416,7 +1422,7 @@ void AsyncServer::begin() { err = _tcp_bind(_pcb, &local_addr, _port); if (err != ERR_OK) { - _tcp_close(&_pcb); + _tcp_close(&_pcb, nullptr); log_e("bind error: %d", err); return; } @@ -1439,7 +1445,7 @@ void AsyncServer::end() { tcp_arg(_pcb, NULL); tcp_accept(_pcb, NULL); if (tcp_close(_pcb) != ERR_OK) { - _tcp_abort(&_pcb); + tcp_abort(_pcb); } _pcb = NULL; TCP_MUTEX_UNLOCK(); diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index d4157f4..d9cd0f7 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -249,6 +249,7 @@ class AsyncClient { protected: tcp_pcb *_pcb; lwip_tcp_event_packet_t *_end_event; + bool _needs_discard; AcConnectHandler _connect_cb; void *_connect_cb_arg; From 74dd111a9b4a240a9df76316339a840e7b62a018 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Fri, 21 Feb 2025 22:03:10 -0500 Subject: [PATCH 23/34] simple_intrusive_list: Add pragma once --- src/simple_intrusive_list.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/simple_intrusive_list.h b/src/simple_intrusive_list.h index a68d1a2..29d1c0f 100644 --- a/src/simple_intrusive_list.h +++ b/src/simple_intrusive_list.h @@ -1,5 +1,7 @@ // Simple intrusive list class +#pragma once + template class simple_intrusive_list { static_assert(std::is_same().next), T *>::value, "Template type must have public 'T* next' member"); From 6de4a1d828a5b9097319bda481639bea27b62dfa Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 15:12:58 +0000 Subject: [PATCH 24/34] Teardown pcb on invalidate It's unclear why this might be necessary - LwIP oughtn't be triggering new callbacks after the error callback; but just to be on the safe side, make sure we have no dangling references in the pcb we are releasing. Related to the changes in #31. --- src/AsyncTCP.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 3a5a5fb..cf99fc0 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -257,18 +257,6 @@ static bool _remove_events_for(AsyncClient *client) { return (bool)guard; }; -// Detail class for interacting with AsyncClient internals, but without exposing the API to other parts of the program -class AsyncClient_detail { -public: - static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client) { - auto end_event = client._end_event; - client._pcb = nullptr; - client._end_event = nullptr; - return end_event; - }; - static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); -}; - static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) { // do client-specific setup auto end_event = _alloc_event(LWIP_TCP_ERROR, client, pcb); @@ -292,6 +280,19 @@ static void _teardown_pcb(tcp_pcb *pcb) { tcp_poll(pcb, NULL, 0); } +// Detail class for interacting with AsyncClient internals, but without exposing the API to other parts of the program +class AsyncClient_detail { +public: + static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client) { + auto end_event = client._end_event; + _teardown_pcb(client._pcb); + client._pcb = nullptr; + client._end_event = nullptr; + return end_event; + }; + static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); +}; + void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { // Special cases first if (e->event == LWIP_TCP_ERROR) { From 570d70e6669fc47f040e77b0e03970d15baf9fbc Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 16:07:31 +0000 Subject: [PATCH 25/34] Use guard object for LwIP locking The existing TCP_MUTEX_(UN)LOCK macros are not reentrant nor safe to call on code that might get run on the LwIP thread. Convert to a C++ lock_guard-style object, which is safer. cleans up some code paths, and correctly supports AsyncClient's constructor. --- src/AsyncTCP.cpp | 84 ++++++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index cf99fc0..c169ba1 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -12,6 +12,8 @@ extern "C" { #include "lwip/inet.h" #include "lwip/opt.h" #include "lwip/tcp.h" +#include "lwip/tcpip.h" +#include "lwip/sys.h" } #if CONFIG_ASYNC_TCP_USE_WDT @@ -30,20 +32,29 @@ extern "C" { #define TAG "AsyncTCP" // https://github.com/espressif/arduino-esp32/issues/10526 +namespace { #ifdef CONFIG_LWIP_TCPIP_CORE_LOCKING -#define TCP_MUTEX_LOCK() \ - if (!sys_thread_tcpip(LWIP_CORE_LOCK_QUERY_HOLDER)) { \ - LOCK_TCPIP_CORE(); \ +struct tcp_core_guard { + bool do_lock; + inline tcp_core_guard() : do_lock(!sys_thread_tcpip(LWIP_CORE_LOCK_QUERY_HOLDER)) { + if (do_lock) { + LOCK_TCPIP_CORE(); + } } - -#define TCP_MUTEX_UNLOCK() \ - if (sys_thread_tcpip(LWIP_CORE_LOCK_QUERY_HOLDER)) { \ - UNLOCK_TCPIP_CORE(); \ + inline ~tcp_core_guard() { + if (do_lock) { + UNLOCK_TCPIP_CORE(); + } } -#else // CONFIG_LWIP_TCPIP_CORE_LOCKING -#define TCP_MUTEX_LOCK() -#define TCP_MUTEX_UNLOCK() + tcp_core_guard(const tcp_core_guard &) = delete; + tcp_core_guard(tcp_core_guard &&) = delete; + tcp_core_guard &operator=(const tcp_core_guard &) = delete; + tcp_core_guard &operator=(tcp_core_guard &&) = delete; +}; +#else // CONFIG_LWIP_TCPIP_CORE_LOCKING +struct tcp_core_guard {}; #endif // CONFIG_LWIP_TCPIP_CORE_LOCKING +} // anonymous namespace /* TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second @@ -685,6 +696,7 @@ AsyncClient::AsyncClient(tcp_pcb *pcb) _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { if (_pcb) { + tcp_core_guard tg; _end_event = _register_pcb(_pcb, this); _rx_last_packet = millis(); if (!_end_event) { @@ -770,23 +782,23 @@ bool AsyncClient::_connect(const ip_addr_t &addr, uint16_t port) { return false; } - TCP_MUTEX_LOCK(); - _pcb = tcp_new_ip_type(addr.type); - if (!_pcb) { - TCP_MUTEX_UNLOCK(); - log_e("pcb == NULL"); - return false; - } - _end_event = _register_pcb(_pcb, this); + { + tcp_core_guard tg; + _pcb = tcp_new_ip_type(addr.type); + if (!_pcb) { + log_e("pcb == NULL"); + return false; + } + _end_event = _register_pcb(_pcb, this); - if (!_end_event) { - log_e("Unable to allocate event"); - tcp_abort(_pcb); - _pcb = nullptr; - return false; + if (!_end_event) { + log_e("Unable to allocate event"); + tcp_abort(_pcb); + _pcb = nullptr; + return false; + } + _needs_discard = true; } - _needs_discard = true; - TCP_MUTEX_UNLOCK(); tcp_api_call_t msg; msg.pcb_ptr = &_pcb; @@ -826,9 +838,12 @@ bool AsyncClient::connect(const char *host, uint16_t port) { return false; } - TCP_MUTEX_LOCK(); - err_t err = dns_gethostbyname(host, &addr, (dns_found_callback)&_tcp_dns_found, this); - TCP_MUTEX_UNLOCK(); + err_t err; + { + tcp_core_guard tg; + err = dns_gethostbyname(host, &addr, (dns_found_callback)&_tcp_dns_found, this); + } + if (err == ERR_OK) { #if ESP_IDF_VERSION_MAJOR < 5 #if LWIP_IPV6 @@ -1400,9 +1415,10 @@ void AsyncServer::begin() { return; } int8_t err; - TCP_MUTEX_LOCK(); - _pcb = tcp_new_ip_type(_bind4 && _bind6 ? IPADDR_TYPE_ANY : (_bind6 ? IPADDR_TYPE_V6 : IPADDR_TYPE_V4)); - TCP_MUTEX_UNLOCK(); + { + tcp_core_guard tg; + _pcb = tcp_new_ip_type(_bind4 && _bind6 ? IPADDR_TYPE_ANY : (_bind6 ? IPADDR_TYPE_V6 : IPADDR_TYPE_V4)); + }; if (!_pcb) { log_e("_pcb == NULL"); return; @@ -1434,22 +1450,20 @@ void AsyncServer::begin() { log_e("listen_pcb == NULL"); return; } - TCP_MUTEX_LOCK(); + tcp_core_guard tg; tcp_arg(_pcb, (void *)this); tcp_accept(_pcb, &_s_accept); - TCP_MUTEX_UNLOCK(); } void AsyncServer::end() { if (_pcb) { - TCP_MUTEX_LOCK(); + tcp_core_guard tg; tcp_arg(_pcb, NULL); tcp_accept(_pcb, NULL); if (tcp_close(_pcb) != ERR_OK) { tcp_abort(_pcb); } _pcb = NULL; - TCP_MUTEX_UNLOCK(); } } From c8efcffa1a24378d8ba6090626823fda93177127 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 16:08:15 +0000 Subject: [PATCH 26/34] Move TCP callbacks in to AsyncClient_detail This gives access to AsyncClient internals, preparing for O(1) coalescing. --- src/AsyncTCP.cpp | 54 +++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index c169ba1..5a0a754 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -118,11 +118,18 @@ struct lwip_tcp_event_packet_t { }; }; -// Forward declarations for TCP event callbacks -static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err); -static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len); -static void _tcp_error(void *arg, int8_t err); -static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb); +// Detail class for interacting with AsyncClient internals, but without exposing the API to other parts of the program +class AsyncClient_detail { +public: + static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client); + static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); + + // TCP event callbacks + static int8_t __attribute__((visibility("internal"))) tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err); + static int8_t __attribute__((visibility("internal"))) tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len); + static void __attribute__((visibility("internal"))) tcp_error(void *arg, int8_t err); + static int8_t __attribute__((visibility("internal"))) tcp_poll(void *arg, struct tcp_pcb *pcb); +}; // helper function static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient *client, tcp_pcb *pcb) { @@ -131,7 +138,7 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient // Client structure is corrupt? log_e("Client mismatch allocating event for 0x%08x 0x%08x vs 0x%08x", (intptr_t)client, (intptr_t)pcb, client->pcb()); tcp_abort(pcb); - _tcp_error(client, ERR_ARG); + AsyncClient_detail::tcp_error(client, ERR_ARG); return nullptr; } @@ -143,7 +150,7 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient if (pcb) { tcp_abort(pcb); } - _tcp_error(client, ERR_MEM); + AsyncClient_detail::tcp_error(client, ERR_MEM); return nullptr; } @@ -273,10 +280,10 @@ static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) auto end_event = _alloc_event(LWIP_TCP_ERROR, client, pcb); if (end_event) { tcp_arg(pcb, client); - tcp_recv(pcb, &_tcp_recv); - tcp_sent(pcb, &_tcp_sent); - tcp_err(pcb, &_tcp_error); - tcp_poll(pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); + tcp_recv(pcb, &AsyncClient_detail::tcp_recv); + tcp_sent(pcb, &AsyncClient_detail::tcp_sent); + tcp_err(pcb, &AsyncClient_detail::tcp_error); + tcp_poll(pcb, &AsyncClient_detail::tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); }; return end_event; } @@ -291,17 +298,12 @@ static void _teardown_pcb(tcp_pcb *pcb) { tcp_poll(pcb, NULL, 0); } -// Detail class for interacting with AsyncClient internals, but without exposing the API to other parts of the program -class AsyncClient_detail { -public: - static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client) { - auto end_event = client._end_event; - _teardown_pcb(client._pcb); - client._pcb = nullptr; - client._end_event = nullptr; - return end_event; - }; - static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); +inline lwip_tcp_event_packet_t *AsyncClient_detail::invalidate_pcb(AsyncClient &client) { + auto end_event = client._end_event; + _teardown_pcb(client._pcb); + client._pcb = nullptr; + client._end_event = nullptr; + return end_event; }; void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { @@ -435,7 +437,7 @@ static int8_t _tcp_connected(void *arg, tcp_pcb *pcb, int8_t err) { return ERR_OK; } -static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb) { +int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { DEBUG_PRINTF("+P: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_POLL, client, pcb); @@ -446,7 +448,7 @@ static int8_t _tcp_poll(void *arg, struct tcp_pcb *pcb) { return ERR_OK; } -static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { +int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); if (e == nullptr) { @@ -467,7 +469,7 @@ static int8_t _tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t return ERR_OK; } -static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { +int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { DEBUG_PRINTF("+S: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); @@ -479,7 +481,7 @@ static int8_t _tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { return ERR_OK; } -static void _tcp_error(void *arg, int8_t err) { +void AsyncClient_detail::tcp_error(void *arg, int8_t err) { DEBUG_PRINTF("+E: 0x%08x %d", arg, err); AsyncClient *client = reinterpret_cast(arg); assert(client); From c424ea3016e2817138df046a16651f8638ca6f2a Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 16:08:15 +0000 Subject: [PATCH 27/34] Raise queue mutex guard scope to callbacks This is preparing to expand the guard scope to include other client object members for event coaelscense. --- src/AsyncTCP.cpp | 87 +++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 5a0a754..622dd34 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -202,37 +202,29 @@ static inline bool _init_async_event_queue() { return true; } -static inline bool _send_async_event(lwip_tcp_event_packet_t *e) { +static inline void _send_async_event(lwip_tcp_event_packet_t *e) { assert(e != nullptr); - queue_mutex_guard guard; - if (guard) { - _async_queue.push_back(e); + _async_queue.push_back(e); #ifdef ASYNC_TCP_DEBUG - uint32_t n; - xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); - DEBUG_PRINTF("0x%08x", (intptr_t)e); + uint32_t n; + xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); + DEBUG_PRINTF("0x%08x", (intptr_t)e); #else - xTaskNotifyGive(_async_service_task_handle); + xTaskNotifyGive(_async_service_task_handle); #endif - } - return (bool)guard; } -static inline bool _prepend_async_event(lwip_tcp_event_packet_t *e) { +static inline void _prepend_async_event(lwip_tcp_event_packet_t *e) { assert(e != nullptr); - queue_mutex_guard guard; - if (guard) { - _async_queue.push_front(e); + _async_queue.push_front(e); #ifdef ASYNC_TCP_DEBUG - uint32_t n; - xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); - DEBUG_PRINTF("0x%08x", (intptr_t)e); + uint32_t n; + xTaskNotifyAndQuery(_async_service_task_handle, 1, eIncrement, &n); + DEBUG_PRINTF("0x%08x", (intptr_t)e); #else - xTaskNotifyGive(_async_service_task_handle); + xTaskNotifyGive(_async_service_task_handle); #endif - } - return (bool)guard; } static inline lwip_tcp_event_packet_t *_get_async_event() { @@ -245,34 +237,30 @@ static inline lwip_tcp_event_packet_t *_get_async_event() { return e; } -static bool _remove_events_for(AsyncClient *client) { - queue_mutex_guard guard; - if (guard) { +static void _remove_events_for(AsyncClient *client) { #ifdef ASYNC_TCP_DEBUG - auto start_length = _async_queue.size(); + auto start_length = _async_queue.size(); #endif - auto removed_event_chain = _async_queue.remove_if([=](lwip_tcp_event_packet_t &pkt) { - return pkt.client == client; - }); + auto removed_event_chain = _async_queue.remove_if([=](lwip_tcp_event_packet_t &pkt) { + return pkt.client == client; + }); - size_t count = 0; - while (removed_event_chain) { - ++count; - auto t = removed_event_chain; - removed_event_chain = t->next; - _free_event(t); - } + size_t count = 0; + while (removed_event_chain) { + ++count; + auto t = removed_event_chain; + removed_event_chain = t->next; + _free_event(t); + } #ifdef ASYNC_TCP_DEBUG - auto end_length = _async_queue.size(); - assert(count + end_length == start_length); - assert(_async_queue.validate_tail()); + auto end_length = _async_queue.size(); + assert(count + end_length == start_length); + assert(_async_queue.validate_tail()); - DEBUG_PRINTF("Removed %d/%d for 0x%08x", count, start_length, (intptr_t)client); + DEBUG_PRINTF("Removed %d/%d for 0x%08x", count, start_length, (intptr_t)client); #endif - }; - return (bool)guard; }; static lwip_tcp_event_packet_t *_register_pcb(tcp_pcb *pcb, AsyncClient *client) { @@ -433,6 +421,7 @@ static int8_t _tcp_connected(void *arg, tcp_pcb *pcb, int8_t err) { return ERR_MEM; } e->connected.err = err; + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } @@ -444,6 +433,8 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { if (e == nullptr) { return ERR_MEM; } + + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } @@ -464,6 +455,8 @@ int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf e->event = LWIP_TCP_FIN; e->fin.err = err; } + + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; @@ -476,7 +469,8 @@ int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len if (e == nullptr) { return ERR_MEM; } - e->sent.len = len; + + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } @@ -490,6 +484,7 @@ void AsyncClient_detail::tcp_error(void *arg, int8_t err) { lwip_tcp_event_packet_t *e = AsyncClient_detail::invalidate_pcb(*client); if (e) { e->error.err = err; + queue_mutex_guard guard; _remove_events_for(client); // FUTURE: we could hold the lock the whole time _prepend_async_event(e); } else { @@ -508,6 +503,8 @@ static void _tcp_dns_found(const char *name, struct ip_addr *ipaddr, void *arg) } else { memset(&e->dns.addr, 0, sizeof(e->dns.addr)); } + + queue_mutex_guard guard; _send_async_event(e); } } @@ -519,6 +516,8 @@ static int8_t _tcp_accept(AsyncServer *server, AsyncClient *client) { return ERR_MEM; } e->accept.server = server; + + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } @@ -598,6 +597,7 @@ static err_t _tcp_close_api(struct tcpip_api_call_data *api_call_msg) { msg->err = ERR_CONN; if (msg->client) { // Client has requested close; purge all events from queue + queue_mutex_guard guard; _remove_events_for(msg->client); } if (pcb_is_active(*msg)) { @@ -626,7 +626,10 @@ static esp_err_t _tcp_close(tcp_pcb **pcb_ptr, AsyncClient *client) { static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - _remove_events_for(msg->client); + { + queue_mutex_guard guard; + _remove_events_for(msg->client); + } if (pcb_is_active(*msg)) { _teardown_pcb(*msg->pcb_ptr); tcp_abort(*msg->pcb_ptr); From 8926f30b2b90a57a79471169de527870c21215de Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 16:08:15 +0000 Subject: [PATCH 28/34] Coalesce poll, recv, and sent events If one of these events is pending on a client, subsequent callbacks can be safely rolled together in to one event. --- src/AsyncTCP.cpp | 69 +++++++++++++++++++++++++++++++++++++----------- src/AsyncTCP.h | 3 +++ 2 files changed, 57 insertions(+), 15 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 622dd34..cacf25f 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -99,10 +99,6 @@ struct lwip_tcp_event_packet_t { int8_t err; } error; struct { - uint16_t len; - } sent; - struct { - pbuf *pb; int8_t err; } recv; struct { @@ -163,10 +159,6 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient static void _free_event(lwip_tcp_event_packet_t *evpkt) { DEBUG_PRINTF("_FE: 0x%08x -> %d 0x%08x [0x%08x]", (intptr_t)evpkt, (int)evpkt->event, (intptr_t)evpkt->client, (intptr_t)evpkt->next); - if ((evpkt->event == LWIP_TCP_RECV) && (evpkt->recv.pb != nullptr)) { - // We must free the packet buffer - pbuf_free(evpkt->recv.pb); - } delete evpkt; } @@ -321,17 +313,31 @@ void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { // TODO: is a switch-case more code efficient? else if (e->event == LWIP_TCP_RECV) { DEBUG_PRINTF("-R: 0x%08x", e->client->_pcb); - e->client->_recv(e->recv.pb, e->recv.err); - e->recv.pb = nullptr; // client has taken responsibility for freeing it + struct pbuf *pb; + { + queue_mutex_guard guard; + pb = e->client->_recv_pending; + e->client->_recv_pending = nullptr; + } + e->client->_recv(pb, e->recv.err); } else if (e->event == LWIP_TCP_FIN) { DEBUG_PRINTF("-F: 0x%08x", e->client->_pcb); e->client->_fin(e->fin.err); } else if (e->event == LWIP_TCP_SENT) { DEBUG_PRINTF("-S: 0x%08x", e->client->_pcb); - e->client->_sent(e->sent.len); + uint16_t sent; + { + queue_mutex_guard guard; + sent = e->client->_sent_pending; + e->client->_sent_pending = 0; + } + e->client->_sent(sent); } else if (e->event == LWIP_TCP_POLL) { DEBUG_PRINTF("-P: 0x%08x", e->client->_pcb); e->client->_poll(); + // Clear poll pending + queue_mutex_guard guard; + e->client->_polls_pending = 0; } else if (e->event == LWIP_TCP_CONNECTED) { DEBUG_PRINTF("-C: 0x%08x 0x%08x %d", e->client, e->client->_pcb, e->connected.err); e->client->_connected(e->connected.err); @@ -429,6 +435,16 @@ static int8_t _tcp_connected(void *arg, tcp_pcb *pcb, int8_t err) { int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { DEBUG_PRINTF("+P: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); + + // Coalesce event, if possible + { + queue_mutex_guard guard; + if (client->_polls_pending) { + ++client->_polls_pending; + return ERR_OK; + } + } + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_POLL, client, pcb); if (e == nullptr) { return ERR_MEM; @@ -441,6 +457,16 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { AsyncClient *client = reinterpret_cast(arg); + + // Coalesce event, if possible + if (pb) { + queue_mutex_guard guard; + if (client->_recv_pending) { + pbuf_cat(client->_recv_pending, pb); + return ERR_OK; + } + } + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); if (e == nullptr) { return ERR_MEM; @@ -448,7 +474,6 @@ int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf if (pb) { DEBUG_PRINTF("+R: 0x%08x", pcb); - e->recv.pb = pb; e->recv.err = err; } else { DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); @@ -465,6 +490,16 @@ int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len) { DEBUG_PRINTF("+S: 0x%08x", pcb); AsyncClient *client = reinterpret_cast(arg); + + // Coalesce event, if possible + { + queue_mutex_guard guard; + if (client->_sent_pending) { + client->_sent_pending += len; + return ERR_OK; + } + } + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); if (e == nullptr) { return ERR_MEM; @@ -697,9 +732,10 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { */ AsyncClient::AsyncClient(tcp_pcb *pcb) - : _pcb(pcb), _end_event(nullptr), _needs_discard(pcb != nullptr), _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), - _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), - _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { + : _pcb(pcb), _end_event(nullptr), _needs_discard(pcb != nullptr), _polls_pending(0), _recv_pending(nullptr), _sent_pending(0), _connect_cb(0), + _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), + _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), + _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { if (_pcb) { tcp_core_guard tg; _end_event = _register_pcb(_pcb, this); @@ -724,6 +760,9 @@ AsyncClient::~AsyncClient() { if (_end_event) { _free_event(_end_event); } + if (_recv_pending) { + pbuf_free(_recv_pending); + } assert(_needs_discard == false); // If we needed the discard callback, it must have been called by now // We take care to clear this flag before calling the discard callback // as the discard callback commonly destructs the AsyncClient object. diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index d9cd0f7..a26990b 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -250,6 +250,9 @@ class AsyncClient { tcp_pcb *_pcb; lwip_tcp_event_packet_t *_end_event; bool _needs_discard; + unsigned _polls_pending; + struct pbuf *_recv_pending; + uint16_t _sent_pending; AcConnectHandler _connect_cb; void *_connect_cb_arg; From 97ca3f83704f2af5b07a92aba6386211bc7b8332 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 16:08:15 +0000 Subject: [PATCH 29/34] Refactor to minimize queue locking --- src/AsyncTCP.cpp | 134 +++++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 57 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index cacf25f..7f252d5 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -99,6 +99,10 @@ struct lwip_tcp_event_packet_t { int8_t err; } error; struct { + uint16_t len; + } sent; + struct { + pbuf *pb; int8_t err; } recv; struct { @@ -118,6 +122,7 @@ struct lwip_tcp_event_packet_t { class AsyncClient_detail { public: static inline lwip_tcp_event_packet_t *invalidate_pcb(AsyncClient &client); + static inline lwip_tcp_event_packet_t *get_async_event(); static void __attribute__((visibility("internal"))) handle_async_event(lwip_tcp_event_packet_t *event); // TCP event callbacks @@ -159,6 +164,9 @@ static lwip_tcp_event_packet_t *_alloc_event(lwip_tcp_event_t event, AsyncClient static void _free_event(lwip_tcp_event_packet_t *evpkt) { DEBUG_PRINTF("_FE: 0x%08x -> %d 0x%08x [0x%08x]", (intptr_t)evpkt, (int)evpkt->event, (intptr_t)evpkt->client, (intptr_t)evpkt->next); + if ((evpkt->event == LWIP_TCP_RECV) && (evpkt->recv.pb != nullptr)) { + pbuf_free(evpkt->recv.pb); + } delete evpkt; } @@ -219,16 +227,6 @@ static inline void _prepend_async_event(lwip_tcp_event_packet_t *e) { #endif } -static inline lwip_tcp_event_packet_t *_get_async_event() { - queue_mutex_guard guard; - lwip_tcp_event_packet_t *e = nullptr; - if (guard) { - e = _async_queue.pop_front(); - } - DEBUG_PRINTF("0x%08x", (intptr_t)e); - return e; -} - static void _remove_events_for(AsyncClient *client) { #ifdef ASYNC_TCP_DEBUG auto start_length = _async_queue.size(); @@ -286,6 +284,33 @@ inline lwip_tcp_event_packet_t *AsyncClient_detail::invalidate_pcb(AsyncClient & return end_event; }; +inline lwip_tcp_event_packet_t *AsyncClient_detail::get_async_event() { + queue_mutex_guard guard; + lwip_tcp_event_packet_t *e = nullptr; + if (guard) { + e = _async_queue.pop_front(); + // special case: override values + if (e) { + switch (e->event) { + case LWIP_TCP_RECV: + if ((e->recv.err == 0) && (e->recv.pb == nullptr)) { + e->recv.pb = e->client->_recv_pending; + e->client->_recv_pending = nullptr; + } + break; + case LWIP_TCP_SENT: + e->sent.len = e->client->_sent_pending; + e->client->_sent_pending = 0; + break; + case LWIP_TCP_POLL: e->client->_polls_pending = 0; break; + default: break; + } + } + } + DEBUG_PRINTF("0x%08x", (intptr_t)e); + return e; +} + void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { // Special cases first if (e->event == LWIP_TCP_ERROR) { @@ -313,31 +338,17 @@ void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { // TODO: is a switch-case more code efficient? else if (e->event == LWIP_TCP_RECV) { DEBUG_PRINTF("-R: 0x%08x", e->client->_pcb); - struct pbuf *pb; - { - queue_mutex_guard guard; - pb = e->client->_recv_pending; - e->client->_recv_pending = nullptr; - } - e->client->_recv(pb, e->recv.err); + e->client->_recv(e->recv.pb, e->recv.err); + e->recv.pb = nullptr; // Consumed by client } else if (e->event == LWIP_TCP_FIN) { DEBUG_PRINTF("-F: 0x%08x", e->client->_pcb); e->client->_fin(e->fin.err); } else if (e->event == LWIP_TCP_SENT) { DEBUG_PRINTF("-S: 0x%08x", e->client->_pcb); - uint16_t sent; - { - queue_mutex_guard guard; - sent = e->client->_sent_pending; - e->client->_sent_pending = 0; - } - e->client->_sent(sent); + e->client->_sent(e->sent.len); } else if (e->event == LWIP_TCP_POLL) { DEBUG_PRINTF("-P: 0x%08x", e->client->_pcb); e->client->_poll(); - // Clear poll pending - queue_mutex_guard guard; - e->client->_polls_pending = 0; } else if (e->event == LWIP_TCP_CONNECTED) { DEBUG_PRINTF("-C: 0x%08x 0x%08x %d", e->client, e->client->_pcb, e->connected.err); e->client->_connected(e->connected.err); @@ -355,7 +366,7 @@ static void _async_service_task(void *pvParameters) { } #endif for (;;) { - while (auto packet = _get_async_event()) { + while (auto packet = AsyncClient_detail::get_async_event()) { AsyncClient_detail::handle_async_event(packet); #if CONFIG_ASYNC_TCP_USE_WDT esp_task_wdt_reset(); @@ -437,12 +448,10 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { AsyncClient *client = reinterpret_cast(arg); // Coalesce event, if possible - { - queue_mutex_guard guard; - if (client->_polls_pending) { - ++client->_polls_pending; - return ERR_OK; - } + queue_mutex_guard guard; + if (client->_polls_pending) { + ++client->_polls_pending; + return ERR_OK; } lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_POLL, client, pcb); @@ -450,7 +459,7 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { return ERR_MEM; } - queue_mutex_guard guard; + assert(client->_polls_pending == 0); _send_async_event(e); return ERR_OK; } @@ -458,32 +467,44 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *pb, int8_t err) { AsyncClient *client = reinterpret_cast(arg); - // Coalesce event, if possible if (pb) { + DEBUG_PRINTF("+R: 0x%08x", pcb); queue_mutex_guard guard; - if (client->_recv_pending) { - pbuf_cat(client->_recv_pending, pb); - return ERR_OK; + + // Coalesce event, if possible + if (err == 0) { + if (client->_recv_pending) { + pbuf_cat(client->_recv_pending, pb); + return ERR_OK; + } } - } - lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); - if (e == nullptr) { - return ERR_MEM; - } + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); + if (e == nullptr) { + pbuf_free(pb); + return ERR_MEM; // TODO - error handling??? + } - if (pb) { - DEBUG_PRINTF("+R: 0x%08x", pcb); + if (err == 0) { + client->_recv_pending = pb; + e->recv.pb = nullptr; + } else { + e->recv.pb = pb; + } e->recv.err = err; + _send_async_event(e); } else { DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); - e->event = LWIP_TCP_FIN; + queue_mutex_guard guard; + lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_FIN, client, pcb); + if (e == nullptr) { + return ERR_MEM; + } + e->fin.err = err; + _send_async_event(e); } - queue_mutex_guard guard; - _send_async_event(e); - return ERR_OK; } @@ -492,12 +513,10 @@ int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len AsyncClient *client = reinterpret_cast(arg); // Coalesce event, if possible - { - queue_mutex_guard guard; - if (client->_sent_pending) { - client->_sent_pending += len; - return ERR_OK; - } + queue_mutex_guard guard; + if (client->_sent_pending) { + client->_sent_pending += len; + return ERR_OK; } lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); @@ -505,7 +524,8 @@ int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len return ERR_MEM; } - queue_mutex_guard guard; + assert(client->_sent_pending == 0); + client->_sent_pending = len; _send_async_event(e); return ERR_OK; } From c4a3de0ec7e019c3ea96e3774fb0e7ea5262b330 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 21:21:52 -0500 Subject: [PATCH 30/34] Remove guard test It's redundant in this design; we block forever --- src/AsyncTCP.cpp | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 7f252d5..2e5b8f3 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -286,25 +286,22 @@ inline lwip_tcp_event_packet_t *AsyncClient_detail::invalidate_pcb(AsyncClient & inline lwip_tcp_event_packet_t *AsyncClient_detail::get_async_event() { queue_mutex_guard guard; - lwip_tcp_event_packet_t *e = nullptr; - if (guard) { - e = _async_queue.pop_front(); - // special case: override values - if (e) { - switch (e->event) { - case LWIP_TCP_RECV: - if ((e->recv.err == 0) && (e->recv.pb == nullptr)) { - e->recv.pb = e->client->_recv_pending; - e->client->_recv_pending = nullptr; - } - break; - case LWIP_TCP_SENT: - e->sent.len = e->client->_sent_pending; - e->client->_sent_pending = 0; - break; - case LWIP_TCP_POLL: e->client->_polls_pending = 0; break; - default: break; - } + lwip_tcp_event_packet_t *e = _async_queue.pop_front(); + // special case: override values + if (e) { + switch (e->event) { + case LWIP_TCP_RECV: + if ((e->recv.err == 0) && (e->recv.pb == nullptr)) { + e->recv.pb = e->client->_recv_pending; + e->client->_recv_pending = nullptr; + } + break; + case LWIP_TCP_SENT: + e->sent.len = e->client->_sent_pending; + e->client->_sent_pending = 0; + break; + case LWIP_TCP_POLL: e->client->_polls_pending = 0; break; + default: break; } } DEBUG_PRINTF("0x%08x", (intptr_t)e); From 8c1a823796e77efe1108202f26d306fb74fa2cba Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 21:25:16 -0500 Subject: [PATCH 31/34] Add CONFIG_ASYNC_TCP_QUEUE_LWIP_LOCK Add a feature to use the LwIP lock for the queue lock. This reduces the load on the LwIP thread, but might have the async thread wait longer. --- src/AsyncTCP.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 2e5b8f3..79e9dd6 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -176,6 +176,9 @@ static simple_intrusive_list _async_queue; static TaskHandle_t _async_service_task_handle = NULL; namespace { +#if defined(CONFIG_LWIP_TCPIP_CORE_LOCKING) && defined(CONFIG_ASYNC_TCP_QUEUE_LWIP_LOCK) +typedef tcp_core_guard queue_mutex_guard; +#else class queue_mutex_guard { bool holds_mutex; @@ -190,6 +193,7 @@ class queue_mutex_guard { return holds_mutex; }; }; +#endif } // namespace static inline bool _init_async_event_queue() { @@ -286,7 +290,7 @@ inline lwip_tcp_event_packet_t *AsyncClient_detail::invalidate_pcb(AsyncClient & inline lwip_tcp_event_packet_t *AsyncClient_detail::get_async_event() { queue_mutex_guard guard; - lwip_tcp_event_packet_t *e = _async_queue.pop_front(); + lwip_tcp_event_packet_t *e = _async_queue.pop_front(); // special case: override values if (e) { switch (e->event) { From 152fb19a2e2c51e06f0fb2d73f22a1454d87f0a9 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 24 Feb 2025 21:28:48 -0500 Subject: [PATCH 32/34] Embed global mutex in queue_mutex_guard Saves a little space when CONFIG_ASYNC_TCP_QUEUE_LWIP_LOCK is set --- src/AsyncTCP.cpp | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 79e9dd6..f4bb2df 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -171,7 +171,6 @@ static void _free_event(lwip_tcp_event_packet_t *evpkt) { } // Global variables -static SemaphoreHandle_t _async_queue_mutex = nullptr; static simple_intrusive_list _async_queue; static TaskHandle_t _async_service_task_handle = NULL; @@ -180,13 +179,22 @@ namespace { typedef tcp_core_guard queue_mutex_guard; #else class queue_mutex_guard { + + // Create-on-first-use idiom for an embedded mutex + static SemaphoreHandle_t _async_queue_mutex() { + + static SemaphoreHandle_t mutex = xSemaphoreCreateMutex(); + assert(mutex != nullptr); + return mutex; + }; + bool holds_mutex; public: - inline queue_mutex_guard() : holds_mutex(xSemaphoreTake(_async_queue_mutex, portMAX_DELAY)){}; + inline queue_mutex_guard() : holds_mutex(xSemaphoreTake(_async_queue_mutex(), portMAX_DELAY)){}; inline ~queue_mutex_guard() { if (holds_mutex) { - xSemaphoreGive(_async_queue_mutex); + xSemaphoreGive(_async_queue_mutex()); } }; inline explicit operator bool() const { @@ -197,12 +205,6 @@ class queue_mutex_guard { } // namespace static inline bool _init_async_event_queue() { - if (!_async_queue_mutex) { - _async_queue_mutex = xSemaphoreCreateMutex(); - if (!_async_queue_mutex) { - return false; - } - } return true; } From 91726a6975f667c10307a163d95d31c55d0be6e9 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 10 Mar 2025 00:46:44 -0400 Subject: [PATCH 33/34] Alternate approach to recv coalesce Instead of pointing to the chain, point to the event. Fixes handling of cases with nonzero error events mid-sequence. This requires clarifying the _remove_events/invalidate_pcb. --- src/AsyncTCP.cpp | 79 +++++++++++++++++++++++++++--------------------- src/AsyncTCP.h | 4 ++- 2 files changed, 48 insertions(+), 35 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 8c6bd13..8ac45fd 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -285,21 +285,26 @@ inline lwip_tcp_event_packet_t *AsyncClient_detail::invalidate_pcb(AsyncClient & _teardown_pcb(client._pcb); client._pcb = nullptr; client._end_event = nullptr; + _remove_events_for(&client); +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV + client._recv_pending = nullptr; // killed by _remove_events_for +#endif return end_event; }; inline lwip_tcp_event_packet_t *AsyncClient_detail::get_async_event() { queue_mutex_guard guard; lwip_tcp_event_packet_t *e = _async_queue.pop_front(); - // special case: override values + // special case: override values while holding the lock if (e) { switch (e->event) { +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV case LWIP_TCP_RECV: - if ((e->recv.err == 0) && (e->recv.pb == nullptr)) { - e->recv.pb = e->client->_recv_pending; + if (e->client->_recv_pending == e) { e->client->_recv_pending = nullptr; } break; +#endif case LWIP_TCP_SENT: e->sent.len = e->client->_sent_pending; e->client->_sent_pending = 0; @@ -473,12 +478,12 @@ int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf queue_mutex_guard guard; // Coalesce event, if possible - if (err == 0) { - if (client->_recv_pending) { - pbuf_cat(client->_recv_pending, pb); - return ERR_OK; - } +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV + if ((err == 0) && (client->_recv_pending)) { + pbuf_cat(client->_recv_pending->pb, pb); + return ERR_OK; } +#endif lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_RECV, client, pcb); if (e == nullptr) { @@ -486,13 +491,17 @@ int8_t AsyncClient_detail::tcp_recv(void *arg, struct tcp_pcb *pcb, struct pbuf return ERR_MEM; // TODO - error handling??? } + e->recv.pb = pb; + e->recv.err = err; +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV if (err == 0) { - client->_recv_pending = pb; - e->recv.pb = nullptr; + client->_recv_pending = e; } else { - e->recv.pb = pb; + // if we had one, we can no longer extend it + client->_recv_pending = nullptr; } - e->recv.err = err; +#endif + _send_async_event(e); } else { DEBUG_PRINTF("+F: 0x%08x -> 0x%08x", pcb, arg); @@ -537,11 +546,10 @@ void AsyncClient_detail::tcp_error(void *arg, int8_t err) { assert(client); // The associated pcb is now invalid and will soon be deallocated // We call on the preallocated end event from the client object + queue_mutex_guard guard; lwip_tcp_event_packet_t *e = AsyncClient_detail::invalidate_pcb(*client); if (e) { e->error.err = err; - queue_mutex_guard guard; - _remove_events_for(client); // FUTURE: we could hold the lock the whole time _prepend_async_event(e); } else { log_e("tcp_error call for client %X with no end event", (intptr_t)client); @@ -647,26 +655,28 @@ static err_t _tcp_recved_api(struct tcpip_api_call_data *api_call_msg) { return msg->err; } -// Sets *pcb_ptr to nullptr +// Unlike LwIP's tcp_close, we don't permit failure +// If the pcb can't be closed cleanly, we abort it. static err_t _tcp_close_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; - if (msg->client) { - // Client has requested close; purge all events from queue - queue_mutex_guard guard; - _remove_events_for(msg->client); - } if (pcb_is_active(*msg)) { - _teardown_pcb(*msg->pcb_ptr); - msg->err = tcp_close(*msg->pcb_ptr); + auto pcb = *msg->pcb_ptr; + if (msg->client) { + // Client has requested close; complete teardown + queue_mutex_guard guard; + _free_event(AsyncClient_detail::invalidate_pcb(*msg->client)); + } + msg->err = tcp_close(pcb); if (msg->err != ERR_OK) { - tcp_abort(*msg->pcb_ptr); + tcp_abort(pcb); } *msg->pcb_ptr = nullptr; } return msg->err; } +// Sets *pcb_ptr to nullptr static esp_err_t _tcp_close(tcp_pcb **pcb_ptr, AsyncClient *client) { if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; @@ -678,7 +688,6 @@ static esp_err_t _tcp_close(tcp_pcb **pcb_ptr, AsyncClient *client) { return msg.err; } -// Sets *pcb_ptr to nullptr static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { tcp_api_call_t *msg = (tcp_api_call_t *)api_call_msg; msg->err = ERR_CONN; @@ -694,6 +703,7 @@ static err_t _tcp_abort_api(struct tcpip_api_call_data *api_call_msg) { return msg->err; } +// Sets *pcb_ptr to nullptr static esp_err_t _tcp_abort(tcp_pcb **pcb_ptr, AsyncClient *client) { if (!pcb_ptr || !*pcb_ptr) { return ERR_CONN; @@ -753,9 +763,12 @@ static tcp_pcb *_tcp_listen_with_backlog(tcp_pcb *pcb, uint8_t backlog) { */ AsyncClient::AsyncClient(tcp_pcb *pcb) - : _pcb(pcb), _end_event(nullptr), _needs_discard(pcb != nullptr), _polls_pending(0), _recv_pending(nullptr), _sent_pending(0), _connect_cb(0), - _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), _recv_cb_arg(0), - _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), + : _pcb(pcb), _end_event(nullptr), _needs_discard(pcb != nullptr), _polls_pending(0), _sent_pending(0), +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV + _recv_pending(nullptr), +#endif + _connect_cb(0), _connect_cb_arg(0), _discard_cb(0), _discard_cb_arg(0), _sent_cb(0), _sent_cb_arg(0), _error_cb(0), _error_cb_arg(0), _recv_cb(0), + _recv_cb_arg(0), _pb_cb(0), _pb_cb_arg(0), _timeout_cb(0), _timeout_cb_arg(0), _ack_pcb(true), _tx_last_packet(0), _rx_timeout(0), _rx_last_ack(0), _ack_timeout(CONFIG_ASYNC_TCP_MAX_ACK_TIME), _connect_port(0) { if (_pcb) { tcp_core_guard tg; @@ -763,7 +776,7 @@ AsyncClient::AsyncClient(tcp_pcb *pcb) _rx_last_packet = millis(); if (!_end_event) { // Out of memory!! - log_e("Unable to allocate event"); + log_e("Unable to allocate error event"); // Swallow this PCB, producing a null client object tcp_abort(_pcb); _pcb = nullptr; @@ -778,12 +791,10 @@ AsyncClient::~AsyncClient() { if (_pcb) { _close(); } - if (_end_event) { - _free_event(_end_event); - } - if (_recv_pending) { - pbuf_free(_recv_pending); - } + assert(!_end_event); // should have been cleaned up by _close +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV + assert(!_recv_pending); +#endif assert(_needs_discard == false); // If we needed the discard callback, it must have been called by now // We take care to clear this flag before calling the discard callback // as the discard callback commonly destructs the AsyncClient object. diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index a26990b..2954deb 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -251,8 +251,10 @@ class AsyncClient { lwip_tcp_event_packet_t *_end_event; bool _needs_discard; unsigned _polls_pending; - struct pbuf *_recv_pending; uint16_t _sent_pending; +#ifdef CONFIG_ASYNC_TCP_COALESCE_RECV + struct lwip_tcp_event_packet_t *_recv_pending; +#endif AcConnectHandler _connect_cb; void *_connect_cb_arg; From ea2c3e424e0760f9d5142c59e5484fc67bd76067 Mon Sep 17 00:00:00 2001 From: Will Miles Date: Mon, 10 Mar 2025 01:05:55 -0400 Subject: [PATCH 34/34] Use atomics instead of locking for sent+poll --- src/AsyncTCP.cpp | 41 +++++++++++------------------------------ src/AsyncTCP.h | 5 +++-- 2 files changed, 14 insertions(+), 32 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 8ac45fd..c4726d3 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -96,9 +96,6 @@ struct lwip_tcp_event_packet_t { struct { int8_t err; } error; - struct { - uint16_t len; - } sent; struct { pbuf *pb; int8_t err; @@ -296,23 +293,11 @@ inline lwip_tcp_event_packet_t *AsyncClient_detail::get_async_event() { queue_mutex_guard guard; lwip_tcp_event_packet_t *e = _async_queue.pop_front(); // special case: override values while holding the lock - if (e) { - switch (e->event) { #ifdef CONFIG_ASYNC_TCP_COALESCE_RECV - case LWIP_TCP_RECV: - if (e->client->_recv_pending == e) { - e->client->_recv_pending = nullptr; - } - break; -#endif - case LWIP_TCP_SENT: - e->sent.len = e->client->_sent_pending; - e->client->_sent_pending = 0; - break; - case LWIP_TCP_POLL: e->client->_polls_pending = 0; break; - default: break; - } + if (e && (e->event = LWIP_TCP_RECV) & & (e->client->_recv_pending == e)) { + e->client->_recv_pending = nullptr; } +#endif DEBUG_PRINTF("0x%08x", (intptr_t)e); return e; } @@ -351,9 +336,11 @@ void AsyncClient_detail::handle_async_event(lwip_tcp_event_packet_t *e) { e->client->_fin(e->fin.err); } else if (e->event == LWIP_TCP_SENT) { DEBUG_PRINTF("-S: 0x%08x", e->client->_pcb); - e->client->_sent(e->sent.len); + auto sent = e->client->_sent_pending.exchange(0); + e->client->_sent(sent); } else if (e->event == LWIP_TCP_POLL) { DEBUG_PRINTF("-P: 0x%08x", e->client->_pcb); + e->client->_polls_pending.store(0); e->client->_poll(); } else if (e->event == LWIP_TCP_CONNECTED) { DEBUG_PRINTF("-C: 0x%08x 0x%08x %d", e->client, e->client->_pcb, e->connected.err); @@ -454,9 +441,7 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { AsyncClient *client = reinterpret_cast(arg); // Coalesce event, if possible - queue_mutex_guard guard; - if (client->_polls_pending) { - ++client->_polls_pending; + if (client->_polls_pending++ != 0) { return ERR_OK; } @@ -465,7 +450,7 @@ int8_t AsyncClient_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { return ERR_MEM; } - assert(client->_polls_pending == 0); + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } @@ -523,19 +508,15 @@ int8_t AsyncClient_detail::tcp_sent(void *arg, struct tcp_pcb *pcb, uint16_t len AsyncClient *client = reinterpret_cast(arg); // Coalesce event, if possible - queue_mutex_guard guard; - if (client->_sent_pending) { - client->_sent_pending += len; - return ERR_OK; - } + auto _prev_pending = client->_sent_pending.fetch_add(len); + if (_prev_pending) return ERR_OK; lwip_tcp_event_packet_t *e = _alloc_event(LWIP_TCP_SENT, client, pcb); if (e == nullptr) { return ERR_MEM; } - assert(client->_sent_pending == 0); - client->_sent_pending = len; + queue_mutex_guard guard; _send_async_event(e); return ERR_OK; } diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 2954deb..3602ff5 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -13,6 +13,7 @@ #endif #include "lwip/ip6_addr.h" #include "lwip/ip_addr.h" +#include #include #ifndef LIBRETINY @@ -250,8 +251,8 @@ class AsyncClient { tcp_pcb *_pcb; lwip_tcp_event_packet_t *_end_event; bool _needs_discard; - unsigned _polls_pending; - uint16_t _sent_pending; + std::atomic _polls_pending; + std::atomic _sent_pending; #ifdef CONFIG_ASYNC_TCP_COALESCE_RECV struct lwip_tcp_event_packet_t *_recv_pending; #endif