From 0e033d923baebee128eada004a5035edaac725a6 Mon Sep 17 00:00:00 2001 From: Damian Krolik Date: Thu, 31 Oct 2024 08:58:09 +0100 Subject: [PATCH 1/2] nrf_rpc: refactor internal tasks 1. Rename internal_data to internal_task to better describe the purpose of the structure. 2. Use internal_task_consumed event to notify about freeing the global task object for all types of tasks. 3. Use union for different types of tasks to not bloat the internal task structure when more tasks or fields are added. Signed-off-by: Damian Krolik --- nrf_rpc/nrf_rpc.c | 79 +++++++++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 33 deletions(-) diff --git a/nrf_rpc/nrf_rpc.c b/nrf_rpc/nrf_rpc.c index 628ee0717c..f0d1a380d2 100644 --- a/nrf_rpc/nrf_rpc.c +++ b/nrf_rpc/nrf_rpc.c @@ -85,24 +85,32 @@ struct init_packet_data { const char *strid; /* Remote group unique identifier. */ }; -enum internal_pool_data_type { - NRF_RPC_INITIALIZATION, - NRF_RPC_ERROR +enum internal_task_type { + NRF_RPC_TASK_GROUP_INIT, + NRF_RPC_TASK_RECV_ERROR }; -struct internal_pool_data { - enum internal_pool_data_type type; - const struct nrf_rpc_group *group; - int err; - uint8_t hdr_id; - uint8_t hdr_type; +struct internal_task { + enum internal_task_type type; + + union { + struct { + const struct nrf_rpc_group *group; + } group_init; + + struct { + const struct nrf_rpc_group *group; + int err; + uint8_t hdr_id; + uint8_t hdr_type; + } recv_error; + }; }; /* Pool of statically allocated command contexts. */ static struct nrf_rpc_cmd_ctx cmd_ctx_pool[CONFIG_NRF_RPC_CMD_CTX_POOL_SIZE]; static struct nrf_rpc_os_event groups_init_event; -static struct nrf_rpc_os_event error_event; /* Number of groups */ static uint8_t group_count; @@ -116,7 +124,8 @@ static bool is_initialized; /* Error handler provided to the init function. */ static nrf_rpc_err_handler_t global_err_handler; -static struct internal_pool_data internal_data; +static struct internal_task internal_task; +static struct nrf_rpc_os_event internal_task_consumed; /* Array with all defiend groups */ NRF_RPC_AUTO_ARR(nrf_rpc_groups_array, "grp"); @@ -361,19 +370,23 @@ static inline bool packet_validate(const uint8_t *packet) static void internal_tx_handler(void) { - struct internal_pool_data copy = internal_data; + struct internal_task task = internal_task; + nrf_rpc_os_event_set(&internal_task_consumed); + + switch (task.type) { + case NRF_RPC_TASK_GROUP_INIT: { + const struct nrf_rpc_group *group = task.group_init.group; - if (copy.type == NRF_RPC_INITIALIZATION) { - nrf_rpc_os_event_set(©.group->data->decode_done_event); - if (group_init_send(copy.group)) { + if (group_init_send(group)) { NRF_RPC_ERR("Failed to send group init packet for group id: %d strid: %s", - copy.group->data->src_group_id, copy.group->strid); + group->data->src_group_id, group->strid); } + break; } - - if (copy.type == NRF_RPC_ERROR) { - nrf_rpc_os_event_set(&error_event); - nrf_rpc_err(copy.err, NRF_RPC_ERR_SRC_RECV, copy.group, copy.hdr_id, copy.hdr_type); + case NRF_RPC_TASK_RECV_ERROR: + nrf_rpc_err(task.recv_error.err, NRF_RPC_ERR_SRC_RECV, task.recv_error.group, + task.recv_error.hdr_id, task.recv_error.hdr_type); + break; } } @@ -547,7 +560,7 @@ static uint8_t parse_incoming_packet(struct nrf_rpc_cmd_ctx *cmd_ctx, /* Thread pool callback */ static void execute_packet(const uint8_t *packet, size_t len) { - if (packet == (const uint8_t *)&internal_data) { + if (packet == (const uint8_t *)&internal_task) { internal_tx_handler(); } else { parse_incoming_packet(NULL, packet, len); @@ -674,10 +687,10 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g * If remote processor does not know our group id, send an init packet back, * since it might have missed our original init packet. */ - internal_data.type = NRF_RPC_INITIALIZATION; - internal_data.group = *group; - nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_data, sizeof(internal_data)); - nrf_rpc_os_event_wait(&(*group)->data->decode_done_event, NRF_RPC_OS_WAIT_FOREVER); + internal_task.type = NRF_RPC_TASK_GROUP_INIT; + internal_task.group_init.group = *group; + nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_task, sizeof(internal_task)); + nrf_rpc_os_event_wait(&internal_task_consumed, NRF_RPC_OS_WAIT_FOREVER); } return 0; @@ -802,13 +815,13 @@ static void receive_handler(const struct nrf_rpc_tr *transport, const uint8_t *p } if (err < 0) { - internal_data.type = NRF_RPC_ERROR; - internal_data.group = group; - internal_data.err = err; - internal_data.hdr_id = hdr.id; - internal_data.hdr_type = hdr.type; - nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_data, sizeof(internal_data)); - nrf_rpc_os_event_wait(&error_event, NRF_RPC_OS_WAIT_FOREVER); + internal_task.type = NRF_RPC_TASK_RECV_ERROR; + internal_task.recv_error.group = group; + internal_task.recv_error.err = err; + internal_task.recv_error.hdr_type = hdr.type; + internal_task.recv_error.hdr_id = hdr.id; + nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_task, sizeof(internal_task)); + nrf_rpc_os_event_wait(&internal_task_consumed, NRF_RPC_OS_WAIT_FOREVER); } } @@ -1083,7 +1096,7 @@ int nrf_rpc_init(nrf_rpc_err_handler_t err_handler) return err; } - err = nrf_rpc_os_event_init(&error_event); + err = nrf_rpc_os_event_init(&internal_task_consumed); if (err < 0) { return err; } From c3845fa90ec6fed3dc5b1803c25450bd304e24b5 Mon Sep 17 00:00:00 2001 From: Damian Krolik Date: Thu, 31 Oct 2024 10:10:37 +0100 Subject: [PATCH 2/2] nrf_rpc: call bound_handler after init reply 1. Ensure that the group's bound_handler is called after sending the init reply. This is to make sure that the remote knows our group ID and is able to process incoming commands when the handler is called. 2. Similarly, signal groups_init_event after sending the init reply. Signed-off-by: Damian Krolik --- nrf_rpc/nrf_rpc.c | 56 +++++++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/nrf_rpc/nrf_rpc.c b/nrf_rpc/nrf_rpc.c index f0d1a380d2..23db8d8c8d 100644 --- a/nrf_rpc/nrf_rpc.c +++ b/nrf_rpc/nrf_rpc.c @@ -96,6 +96,8 @@ struct internal_task { union { struct { const struct nrf_rpc_group *group; + bool send_reply; + bool signal_groups_init_event; } group_init; struct { @@ -377,10 +379,19 @@ static void internal_tx_handler(void) case NRF_RPC_TASK_GROUP_INIT: { const struct nrf_rpc_group *group = task.group_init.group; - if (group_init_send(group)) { + if (task.group_init.send_reply && group_init_send(group)) { NRF_RPC_ERR("Failed to send group init packet for group id: %d strid: %s", group->data->src_group_id, group->strid); } + + if (group->bound_handler != NULL) { + group->bound_handler(group); + } + + if (task.group_init.signal_groups_init_event) { + nrf_rpc_os_event_set(&groups_init_event); + } + break; } case NRF_RPC_TASK_RECV_ERROR: @@ -633,7 +644,8 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g struct init_packet_data init_data = {0}; struct nrf_rpc_group_data *group_data; bool first_init; - bool wait_on_init; + bool signal_groups_init_event = false; + bool send_reply; *group = NULL; @@ -663,32 +675,38 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g group_data = (*group)->data; first_init = group_data->dst_group_id == NRF_RPC_ID_UNKNOWN; group_data->dst_group_id = hdr->src_group_id; - wait_on_init = (*group)->flags & NRF_RPC_FLAGS_WAIT_ON_INIT; - nrf_rpc_group_bound_handler_t bound_handler = (*group)->bound_handler; NRF_RPC_DBG("Found corresponding local group. Remote id: %d, Local id: %d", - hdr->src_group_id, group_data->src_group_id); - - if (bound_handler != NULL) { - bound_handler(*group); - } + group_data->dst_group_id, group_data->src_group_id); - if (first_init && wait_on_init) { + if (first_init && (*group)->flags & NRF_RPC_FLAGS_WAIT_ON_INIT) { ++initialized_group_count; - if (initialized_group_count == waiting_group_count) { - /* All groups are initialized. */ - nrf_rpc_os_event_set(&groups_init_event); - } - } - - if (hdr->dst_group_id == NRF_RPC_ID_UNKNOWN && (*group)->data->transport_initialized) { /* - * If remote processor does not know our group id, send an init packet back, - * since it might have missed our original init packet. + * If this is the last group that nrf_rpc_init() is waiting for, use the async task + * to signal the corresponding event and unblock the waiting thread. */ + signal_groups_init_event = (initialized_group_count == waiting_group_count); + } + + /* + * If the remote processor does not know our group id, send an init packet back as + * either we are not an initiator, which is indicated by NRF_RPC_FLAGS_INITIATOR + * flag, or the remote has missed our init packet. + */ + send_reply = (hdr->dst_group_id == NRF_RPC_ID_UNKNOWN) && group_data->transport_initialized; + + /* + * Spawn the async task only if necessary. The async task is used to avoid sending the init + * reply in the transport receive thread. The application is also notified about the group + * initialization from within the task to ensure that when this happens the init reply has + * already been sent and the remote is ready to receive nRF RPC commands. + */ + if (((*group)->bound_handler != NULL) || send_reply || signal_groups_init_event) { internal_task.type = NRF_RPC_TASK_GROUP_INIT; internal_task.group_init.group = *group; + internal_task.group_init.send_reply = send_reply; + internal_task.group_init.signal_groups_init_event = signal_groups_init_event; nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_task, sizeof(internal_task)); nrf_rpc_os_event_wait(&internal_task_consumed, NRF_RPC_OS_WAIT_FOREVER); }