Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

nrf_rpc: call bound_handler after init reply #1550

Merged
merged 2 commits into from
Nov 18, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 82 additions & 51 deletions nrf_rpc/nrf_rpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,24 +85,34 @@ struct init_packet_data {
const char *strid; /* Remote group unique identifier. */
};

enum internal_pool_data_type {
NRF_RPC_INITIALIZATION,
NRF_RPC_ERROR
enum internal_task_type {
NRF_RPC_TASK_GROUP_INIT,
NRF_RPC_TASK_RECV_ERROR
};

struct internal_pool_data {
enum internal_pool_data_type type;
const struct nrf_rpc_group *group;
int err;
uint8_t hdr_id;
uint8_t hdr_type;
struct internal_task {
enum internal_task_type type;

union {
struct {
const struct nrf_rpc_group *group;
bool send_reply;
bool signal_groups_init_event;
} group_init;

struct {
const struct nrf_rpc_group *group;
int err;
uint8_t hdr_id;
uint8_t hdr_type;
} recv_error;
};
};

/* Pool of statically allocated command contexts. */
static struct nrf_rpc_cmd_ctx cmd_ctx_pool[CONFIG_NRF_RPC_CMD_CTX_POOL_SIZE];

static struct nrf_rpc_os_event groups_init_event;
static struct nrf_rpc_os_event error_event;

/* Number of groups */
static uint8_t group_count;
Expand All @@ -116,7 +126,8 @@ static bool is_initialized;
/* Error handler provided to the init function. */
static nrf_rpc_err_handler_t global_err_handler;

static struct internal_pool_data internal_data;
static struct internal_task internal_task;
static struct nrf_rpc_os_event internal_task_consumed;

/* Array with all defiend groups */
NRF_RPC_AUTO_ARR(nrf_rpc_groups_array, "grp");
Expand Down Expand Up @@ -361,19 +372,32 @@ static inline bool packet_validate(const uint8_t *packet)

static void internal_tx_handler(void)
{
struct internal_pool_data copy = internal_data;
struct internal_task task = internal_task;
nrf_rpc_os_event_set(&internal_task_consumed);

switch (task.type) {
case NRF_RPC_TASK_GROUP_INIT: {
const struct nrf_rpc_group *group = task.group_init.group;

if (copy.type == NRF_RPC_INITIALIZATION) {
nrf_rpc_os_event_set(&copy.group->data->decode_done_event);
if (group_init_send(copy.group)) {
if (task.group_init.send_reply && group_init_send(group)) {
NRF_RPC_ERR("Failed to send group init packet for group id: %d strid: %s",
copy.group->data->src_group_id, copy.group->strid);
group->data->src_group_id, group->strid);
}

if (group->bound_handler != NULL) {
group->bound_handler(group);
}
}

if (copy.type == NRF_RPC_ERROR) {
nrf_rpc_os_event_set(&error_event);
nrf_rpc_err(copy.err, NRF_RPC_ERR_SRC_RECV, copy.group, copy.hdr_id, copy.hdr_type);
if (task.group_init.signal_groups_init_event) {
nrf_rpc_os_event_set(&groups_init_event);
}

break;
}
case NRF_RPC_TASK_RECV_ERROR:
nrf_rpc_err(task.recv_error.err, NRF_RPC_ERR_SRC_RECV, task.recv_error.group,
task.recv_error.hdr_id, task.recv_error.hdr_type);
break;
}
}

Expand Down Expand Up @@ -547,7 +571,7 @@ static uint8_t parse_incoming_packet(struct nrf_rpc_cmd_ctx *cmd_ctx,
/* Thread pool callback */
static void execute_packet(const uint8_t *packet, size_t len)
{
if (packet == (const uint8_t *)&internal_data) {
if (packet == (const uint8_t *)&internal_task) {
internal_tx_handler();
} else {
parse_incoming_packet(NULL, packet, len);
Expand Down Expand Up @@ -620,7 +644,8 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g
struct init_packet_data init_data = {0};
struct nrf_rpc_group_data *group_data;
bool first_init;
bool wait_on_init;
bool signal_groups_init_event = false;
bool send_reply;

*group = NULL;

Expand Down Expand Up @@ -650,34 +675,40 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g
group_data = (*group)->data;
first_init = group_data->dst_group_id == NRF_RPC_ID_UNKNOWN;
group_data->dst_group_id = hdr->src_group_id;
wait_on_init = (*group)->flags & NRF_RPC_FLAGS_WAIT_ON_INIT;
nrf_rpc_group_bound_handler_t bound_handler = (*group)->bound_handler;

NRF_RPC_DBG("Found corresponding local group. Remote id: %d, Local id: %d",
hdr->src_group_id, group_data->src_group_id);
group_data->dst_group_id, group_data->src_group_id);

if (bound_handler != NULL) {
bound_handler(*group);
}

if (first_init && wait_on_init) {
if (first_init && (*group)->flags & NRF_RPC_FLAGS_WAIT_ON_INIT) {
++initialized_group_count;

if (initialized_group_count == waiting_group_count) {
/* All groups are initialized. */
nrf_rpc_os_event_set(&groups_init_event);
}
}

if (hdr->dst_group_id == NRF_RPC_ID_UNKNOWN && (*group)->data->transport_initialized) {
/*
* If remote processor does not know our group id, send an init packet back,
* since it might have missed our original init packet.
* If this is the last group that nrf_rpc_init() is waiting for, use the async task
* to signal the corresponding event and unblock the waiting thread.
*/
internal_data.type = NRF_RPC_INITIALIZATION;
internal_data.group = *group;
nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_data, sizeof(internal_data));
nrf_rpc_os_event_wait(&(*group)->data->decode_done_event, NRF_RPC_OS_WAIT_FOREVER);
signal_groups_init_event = (initialized_group_count == waiting_group_count);
}

/*
* If the remote processor does not know our group id, send an init packet back as
* either we are not an initiator, which is indicated by NRF_RPC_FLAGS_INITIATOR
* flag, or the remote has missed our init packet.
*/
send_reply = (hdr->dst_group_id == NRF_RPC_ID_UNKNOWN) && group_data->transport_initialized;

/*
* Spawn the async task only if necessary. The async task is used to avoid sending the init
* reply in the transport receive thread. The application is also notified about the group
* initialization from within the task to ensure that when this happens the init reply has
* already been sent and the remote is ready to receive nRF RPC commands.
*/
if (((*group)->bound_handler != NULL) || send_reply || signal_groups_init_event) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure that this will not result in multiple bound_handler calls?

Copy link
Contributor Author

@Damian-Nordic Damian-Nordic Oct 31, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not, but previously it was the same, right? I mean bound_handler was called for each incoming init packet, reglardless if this was the first or n-th packet.

internal_task.type = NRF_RPC_TASK_GROUP_INIT;
internal_task.group_init.group = *group;
internal_task.group_init.send_reply = send_reply;
internal_task.group_init.signal_groups_init_event = signal_groups_init_event;
nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_task, sizeof(internal_task));
nrf_rpc_os_event_wait(&internal_task_consumed, NRF_RPC_OS_WAIT_FOREVER);
}

return 0;
Expand Down Expand Up @@ -802,13 +833,13 @@ static void receive_handler(const struct nrf_rpc_tr *transport, const uint8_t *p
}

if (err < 0) {
internal_data.type = NRF_RPC_ERROR;
internal_data.group = group;
internal_data.err = err;
internal_data.hdr_id = hdr.id;
internal_data.hdr_type = hdr.type;
nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_data, sizeof(internal_data));
nrf_rpc_os_event_wait(&error_event, NRF_RPC_OS_WAIT_FOREVER);
internal_task.type = NRF_RPC_TASK_RECV_ERROR;
internal_task.recv_error.group = group;
internal_task.recv_error.err = err;
internal_task.recv_error.hdr_type = hdr.type;
internal_task.recv_error.hdr_id = hdr.id;
nrf_rpc_os_thread_pool_send((const uint8_t *)&internal_task, sizeof(internal_task));
nrf_rpc_os_event_wait(&internal_task_consumed, NRF_RPC_OS_WAIT_FOREVER);
}
}

Expand Down Expand Up @@ -1083,7 +1114,7 @@ int nrf_rpc_init(nrf_rpc_err_handler_t err_handler)
return err;
}

err = nrf_rpc_os_event_init(&error_event);
err = nrf_rpc_os_event_init(&internal_task_consumed);
if (err < 0) {
return err;
}
Expand Down
Loading