diff --git a/nrf_rpc/include/nrf_rpc.h b/nrf_rpc/include/nrf_rpc.h index 2bba86fbc7..e52a2d484b 100644 --- a/nrf_rpc/include/nrf_rpc.h +++ b/nrf_rpc/include/nrf_rpc.h @@ -171,6 +171,20 @@ struct nrf_rpc_err_report { enum nrf_rpc_packet_type packet_type; }; +/** @brief Cleanup procedure list element. + */ +struct nrf_rpc_cleanup_handler +{ + /** @brief Handler to the cleanup function invoked on the call to @ref nrf_rpc_stop. */ + void (*handler)(void *context); + + /** @brief Custom context passed as the function parameter */ + void *context; + + /** @brief Pointer to the next element in the list. Managed automatically by the nrf_rpc */ + struct nrf_rpc_cleanup_handler *next; +}; + /** @brief Internal macro for parametrizing nrf_rpc groups. * * @param _name Symbol name of the group. @@ -357,6 +371,41 @@ void nrf_rpc_set_bound_handler(nrf_rpc_group_bound_handler_t bound_handler); */ int nrf_rpc_init(nrf_rpc_err_handler_t err_handler); + +/** @brief Registers the cleanup handler. + * + * Calling this adds the handler to the list. The handler will be called automatically during @ref nrf_rpc_stop + * if the cleanup parameter is set to true. + * This API is used to clean up any custom states set during RPC operations, for example clean up callbacks set. + * + * @param handler Pointer to the structure storing the cleanup handler. This handlers need to stay valid even + * after the function has finished. + * + */ +void nrf_rpc_register_cleanup_handler(struct nrf_rpc_cleanup_handler *handler); + + +/** @brief Temporarily suspend all RPC communication + * + * Calling this function automatically fails all communication until @ref nrf_rpc_resume is called. + * It also causes all pending commands to fail immediately. + * + * @param cleanup set to true to also invoke all the custom cleanup handlers. + * + * @note If the cleanup parameter is set to true it may be needed to reset the peer before invoking + * @ref nrf_rpc_resume. + * + */ +void nrf_rpc_stop(bool cleanup); + +/** @brief resumes RPC communication + * + * Calling this function reverts the effects of @ref nrf_rpc_stop. + * + */ +void nrf_rpc_resume(void); + + /** @brief Send a command and provide callback to handle response. * * @param group Group that command belongs to. diff --git a/nrf_rpc/nrf_rpc.c b/nrf_rpc/nrf_rpc.c index 528781efc6..a5ca66cf3b 100644 --- a/nrf_rpc/nrf_rpc.c +++ b/nrf_rpc/nrf_rpc.c @@ -125,6 +125,7 @@ static uint8_t initialized_group_count; /* nRF RPC initialization status. */ static bool is_initialized; +static bool is_stopped; /* Error handler provided to the init function. */ static nrf_rpc_err_handler_t global_err_handler; @@ -135,6 +136,9 @@ static nrf_rpc_group_bound_handler_t global_bound_handler; static struct internal_task internal_task; static struct nrf_rpc_os_event internal_task_consumed; +static struct nrf_rpc_os_mutex cleanup_mutex; +static struct nrf_rpc_cleanup_handler *cleanup_handlers; + /* Array with all defiend groups */ NRF_RPC_AUTO_ARR(nrf_rpc_groups_array, "grp"); @@ -726,6 +730,19 @@ static int init_packet_handle(struct header *hdr, const struct nrf_rpc_group **g return 0; } +static void abort_all_ops(void) +{ + NRF_RPC_INF("Canceling all tasks."); + for (int i = 0; i < CONFIG_NRF_RPC_CMD_CTX_POOL_SIZE; i++) { + struct nrf_rpc_cmd_ctx *ctx = &cmd_ctx_pool[i]; + nrf_rpc_os_mutex_lock(&ctx->mutex); + if (ctx->use_count > 0) { + nrf_rpc_os_msg_set(&ctx->recv_msg, NULL, 0); + } + nrf_rpc_os_mutex_unlock(&ctx->mutex); + } +} + /* Callback from transport layer that handles incoming. */ static void receive_handler(const struct nrf_rpc_tr *transport, const uint8_t *packet, size_t len, void *context) @@ -742,6 +759,18 @@ static void receive_handler(const struct nrf_rpc_tr *transport, const uint8_t *p goto cleanup_and_exit; } + if (is_stopped && + (hdr.type == NRF_RPC_PACKET_TYPE_CMD || + hdr.type == NRF_RPC_PACKET_TYPE_EVT || + hdr.type == NRF_RPC_PACKET_TYPE_ACK || + hdr.type == NRF_RPC_PACKET_TYPE_RSP)) { + // drop only selected types of packets + // do not drop INITs and ERRORS + + NRF_RPC_WRN("Dropping the packet."); + goto cleanup_and_exit; + } + if (hdr.type == NRF_RPC_PACKET_TYPE_CMD || hdr.type == NRF_RPC_PACKET_TYPE_EVT || hdr.type == NRF_RPC_PACKET_TYPE_ACK || @@ -974,33 +1003,41 @@ int nrf_rpc_cmd_common(const struct nrf_rpc_group *group, uint32_t cmd, handler_data = ptr2; } - cmd_ctx = cmd_ctx_reserve(); + nrf_rpc_os_mutex_lock(&cleanup_mutex); + if (is_stopped) { + err = -NRF_EPERM; + nrf_rpc_os_mutex_unlock(&cleanup_mutex); + } else { - hdr.dst = cmd_ctx->remote_id; - hdr.src = cmd_ctx->id; - hdr.id = cmd & 0xFF; - hdr.src_group_id = group->data->src_group_id; - hdr.dst_group_id = group->data->dst_group_id; - header_cmd_encode(full_packet, &hdr); + cmd_ctx = cmd_ctx_reserve(); + nrf_rpc_os_mutex_unlock(&cleanup_mutex); /* release the mutex as the context specific one has been acquired */ - old_handler = cmd_ctx->handler; - old_handler_data = cmd_ctx->handler_data; - cmd_ctx->handler = handler; - cmd_ctx->handler_data = handler_data; + hdr.dst = cmd_ctx->remote_id; + hdr.src = cmd_ctx->id; + hdr.id = cmd & 0xFF; + hdr.src_group_id = group->data->src_group_id; + hdr.dst_group_id = group->data->dst_group_id; + header_cmd_encode(full_packet, &hdr); - NRF_RPC_DBG("Sending command 0x%02X from group 0x%02X", cmd, - group->data->src_group_id); + old_handler = cmd_ctx->handler; + old_handler_data = cmd_ctx->handler_data; + cmd_ctx->handler = handler; + cmd_ctx->handler_data = handler_data; - err = send(group, full_packet, len + NRF_RPC_HEADER_SIZE); + NRF_RPC_DBG("Sending command 0x%02X from group 0x%02X", cmd, + group->data->src_group_id); - if (err >= 0) { - err = wait_for_response(group, cmd_ctx, rsp_packet, rsp_len); - } + err = send(group, full_packet, len + NRF_RPC_HEADER_SIZE); - cmd_ctx->handler = old_handler; - cmd_ctx->handler_data = old_handler_data; + if (err >= 0) { + err = wait_for_response(group, cmd_ctx, rsp_packet, rsp_len); + } - cmd_ctx_release(cmd_ctx); + cmd_ctx->handler = old_handler; + cmd_ctx->handler_data = old_handler_data; + + cmd_ctx_release(cmd_ctx); + } return err; } @@ -1121,6 +1158,8 @@ int nrf_rpc_init(nrf_rpc_err_handler_t err_handler) return 0; } + nrf_rpc_os_mutex_init(&cleanup_mutex); + global_err_handler = err_handler; for (NRF_RPC_AUTO_ARR_FOR(iter, group, &nrf_rpc_groups_array, @@ -1195,6 +1234,50 @@ int nrf_rpc_init(nrf_rpc_err_handler_t err_handler) return err; } +void nrf_rpc_register_cleanup_handler(struct nrf_rpc_cleanup_handler *handler) +{ + nrf_rpc_os_mutex_lock(&cleanup_mutex); + + struct nrf_rpc_cleanup_handler *current; + + // make sure not added twice + for (current = cleanup_handlers; current != NULL; current = current->next) { + if (current == handler) { + break; + } + } + + if (current == NULL) { + handler->next = cleanup_handlers; + cleanup_handlers = handler; + } + + nrf_rpc_os_mutex_unlock(&cleanup_mutex); +} + +void nrf_rpc_stop(bool cleanup) +{ + nrf_rpc_os_mutex_lock(&cleanup_mutex); + + is_stopped = true; + abort_all_ops(); + if (cleanup) { + struct nrf_rpc_cleanup_handler *current; + for (current = cleanup_handlers; current != NULL; current = current->next) { + if (current->handler != NULL) { + current->handler(current->context); + } + } + } + + nrf_rpc_os_mutex_unlock(&cleanup_mutex); +} + +void nrf_rpc_resume(void) +{ + is_stopped = false; +} + int nrf_rpc_cmd(const struct nrf_rpc_group *group, uint8_t cmd, uint8_t *packet, size_t len, nrf_rpc_handler_t handler, void *handler_data) {