diff --git a/src/arch/host/lib/ams.c b/src/arch/host/lib/ams.c new file mode 100644 index 000000000..946611d26 --- /dev/null +++ b/src/arch/host/lib/ams.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2023 Intel Corporation. All rights reserved. +// +// Author: Krzysztof Frydryk + +/** + * \file + * \brief Xtensa Asynchronous Messaging Service implementation file + * \authors Krzysztof Frydryk + */ + +#include +#include +#include + +static struct async_message_service *host_ams; + +struct async_message_service **arch_ams_get(void) +{ + return &host_ams; +} diff --git a/src/arch/xtensa/lib/CMakeLists.txt b/src/arch/xtensa/lib/CMakeLists.txt index 8112056bf..f31326299 100644 --- a/src/arch/xtensa/lib/CMakeLists.txt +++ b/src/arch/xtensa/lib/CMakeLists.txt @@ -2,6 +2,10 @@ add_local_sources(sof notifier.c) +if (CONFIG_AMS) + add_local_sources(sof ams.c) +endif() + if (CONFIG_MULTICORE) add_local_sources(sof cpu.c) endif() diff --git a/src/arch/xtensa/lib/ams.c b/src/arch/xtensa/lib/ams.c new file mode 100644 index 000000000..87fc1f482 --- /dev/null +++ b/src/arch/xtensa/lib/ams.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2023 Intel Corporation. All rights reserved. +// +// Author: Krzysztof Frydryk + +/** + * \file + * \brief Xtensa Asynchronous Messaging Service implementation file + * \authors Krzysztof Frydryk + */ + +#include +#include +#include + +struct async_message_service **arch_ams_get(void) +{ +#if CONFIG_AMS + struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); + + return &ctx->ams; +#else + return NULL; +#endif +} diff --git a/src/arch/xtensa/xtos/xtos-structs.h b/src/arch/xtensa/xtos/xtos-structs.h index aed01254e..90f7410f0 100644 --- a/src/arch/xtensa/xtos/xtos-structs.h +++ b/src/arch/xtensa/xtos/xtos-structs.h @@ -54,6 +54,9 @@ struct core_context { struct task *main_task; struct schedulers *schedulers; struct notify *notify; +#ifdef CONFIG_AMS + struct async_message_service *ams; +#endif struct idc *idc; }; diff --git a/src/idc/idc.c b/src/idc/idc.c index f81e5a4ec..3f5ef1f57 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -32,6 +32,7 @@ #include #include #include +#include LOG_MODULE_REGISTER(idc, CONFIG_SOF_LOG_LEVEL); @@ -274,6 +275,15 @@ static void idc_prepare_d0ix(void) platform_pm_runtime_prepare_d0ix_en(cpu_get_id()); } +static void idc_process_async_msg(uint32_t slot) +{ +#if CONFIG_AMS + process_incoming_message(slot); +#else + tr_err(&idc_tr, "idc_cmd(): AMS not enabled"); +#endif +} + /** * \brief Handle IDC secondary core crashed message. * \param[in] header IDC message header @@ -335,6 +345,9 @@ void idc_cmd(struct idc_msg *msg) case iTS(IDC_MSG_SECONDARY_CORE_CRASHED): idc_secondary_core_crashed(msg->header); break; + case iTS(IDC_MSG_AMS): + idc_process_async_msg(IDC_HEADER_TO_AMS_SLOT_MASK(msg->header)); + break; default: tr_err(&idc_tr, "idc_cmd(): invalid msg->header = %u", msg->header); diff --git a/src/include/sof/lib/ams.h b/src/include/sof/lib/ams.h new file mode 100644 index 000000000..8cb3ae217 --- /dev/null +++ b/src/include/sof/lib/ams.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright (c) 2023 Intel Corporation + * + * Author: Krzysztof Frydryk + */ + +#ifndef __SOF_LIB_AMS_H__ +#define __SOF_LIB_AMS_H__ + +#include +#include +#include +#include +#include +#include + +/* Reserved value "does not exist" or "unassigned" value for msg types */ +#define AMS_INVALID_MSG_TYPE 0 +/* Reserved value "does not exist" or "unassigned" value for slots */ +#define AMS_INVALID_SLOT 0xFF +/* Wildcard for module_id and instance_id values */ +#define AMS_ANY_ID 0xFFFF + +/* max number of message UUIDs */ +#define AMS_SERVICE_UUID_TABLE_SIZE 16 +/* max number of async message routes */ +#define AMS_ROUTING_TABLE_SIZE 16 +/* Space allocated for async message content*/ +#define AMS_MAX_MSG_SIZE 0x1000 + +/* Size of slots message, module id and instance id */ +#define AMS_SLOT_SIZE(msg) (AMS_MESSAGE_SIZE(msg) + sizeof(uint16_t) * 2) +#define AMS_MESSAGE_SIZE(msg) (sizeof(*msg) - sizeof(char) + (sizeof(char) * (msg->message_length))) + +/** + * \brief IXC message payload + * + * ams_message_payload - contains the actual Async Msg payload + */ +struct ams_message_payload { + /* Message IDs are assigned dynamically on new message entry creation + * For a new payload should be acquired by ams_get_message_type_id + */ + uint32_t message_type_id; + /* Producers module ID */ + uint16_t producer_module_id; + /* Producers instance ID */ + uint16_t producer_instance_id; + /* Message length */ + uint32_t message_length; + /* Message payload */ + uint8_t *message; +}; + +struct ams_slot { + uint16_t module_id; + uint16_t instance_id; + union { + struct ams_message_payload msg; + uint8_t msg_raw[AMS_MAX_MSG_SIZE]; + } u; + uint32_t __aligned(PLATFORM_DCACHE_ALIGN) pad[]; +}; + +/** + * \brief ams_msg_callback_fn + * + * Each subscriber provides this handler function for each message ID + */ +typedef void (*ams_msg_callback_fn)(const struct ams_message_payload *const ams_message_payload, + void *ctx); + +/** + * \brief Internal struct ams_consumer_entry + * + * Describes a single consumer's subscription to a single message. + * Array of 'ams_consumer_entry' structs forms AsyncMessageService's routing + * table which allows for message dispatch. + */ +struct ams_consumer_entry { + /* Message ID that will be routed via this entry */ + uint32_t message_type_id; + /* Callback provided by the subscribed consumer */ + ams_msg_callback_fn consumer_callback; + /* Additional context for consumer_callback (optional) */ + void *ctx; + /* Subscribed consumer's Module ID */ + uint16_t consumer_module_id; + /* Subscribed consumer's Module Instance ID */ + uint8_t consumer_instance_id; + /* Subscribed consumer's Module core id. Saved to speed up routing */ + uint8_t consumer_core_id; +}; + +struct ams_producer { + /* Message ID that will be routed via this entry */ + uint32_t message_type_id; + /* Subscribed producer's Module ID */ + uint16_t producer_module_id; + /* Subscribed producer's Module Instance ID */ + uint8_t producer_instance_id; +}; + +struct uuid_idx { + uint32_t message_type_id; + uint8_t message_uuid[UUID_SIZE]; +}; + +struct ams_shared_context { + /* should be only used with ams_acquire/release function, not generic ones */ + struct coherent c; + + uint32_t last_used_msg_id; + struct ams_consumer_entry rt_table[AMS_ROUTING_TABLE_SIZE]; + struct ams_producer producer_table[AMS_ROUTING_TABLE_SIZE]; + struct uuid_idx uuid_table[AMS_SERVICE_UUID_TABLE_SIZE]; + + uint32_t slot_uses[CONFIG_CORE_COUNT]; + /* marks which core already processed slot */ + uint32_t slot_done[CONFIG_CORE_COUNT]; + + struct ams_slot slots[CONFIG_CORE_COUNT]; +}; + +struct ams_context { + /* shared context must be always accessed with shared->c taken */ + struct ams_shared_context *shared; +}; + +struct ams_task { + struct task ams_task; + struct async_message_service *ams; + uint32_t pending_slots; +}; + +struct async_message_service { +#if CONFIG_SMP + struct ams_task ams_task; +#endif /* CONFIG_SMP */ + struct ams_context *ams_context; +}; + +#if CONFIG_AMS +int ams_init(void); + +/** + * \brief Get Message Type ID + * + * assigns and returns a message type ID for specified message UUID. + * The value of message type ID is dynamically assigned and it will change between runs. + * + * \param[in] message_uuid UUID of message type + * \param[in] message_type_id Unique message type ID assigned by AMS + */ +int ams_get_message_type_id(const uint8_t *message_uuid, + uint32_t *message_type_id); + +/** + * \brief Producer Register + * + * registers a producer of asynchronous messages of given message type. + * When a module instance calls this function, + * it informs the Asynchronous Messaging Service that it will be sending asynchronous messages. + * + * \param[in] message_type_id unique message type ID assigned during ams_get_message_type_id + * \param[in] module_id Module ID of module calling function + * \param[in] instance_id Instance ID of module calling function + */ +int ams_register_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id); + +/** + * \brief Producer Unregister + * + * unregisters a producer of asynchronous messages of given type. + * When a module instance calls this function, + * it informs the Asynchronous Messaging Service + * that it will not be sending asynchronous messages anymore. + * + * \param[in] message_type_id unique message type ID assigned during ams_get_message_type_id + * \param[in] module_id Module ID of module calling function + * \param[in] instance_id Instance ID of module calling function + */ +int ams_unregister_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id); + +/** + * \brief Register Consumer + * + * Registers a module instance as a consumer of specified message type. + * When specified message type is sent, + * a callback is called that was provided during registration process. + * + * The consumer callback is triggered when ams_send function was used to send a message + * and/or when ams_send_mi function with consumer's module ID and instance ID was used + * to send a message. + * + * \param[in] message_type_id unique message type ID assigned during ams_get_message_type_id + * \param[in] module_id Module ID of module calling function + * \param[in] instance_id Instance ID of module calling function + * \param[in] function callback that should be called when message is received + * \param[in] ctx Optional context that is passed to callback + */ +int ams_register_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function, + void *ctx); + +/** + * \brief Unegister Consumer + * + * Unregisters a consumer of specified message type + * + * \param[in] message_type_id unique message type ID assigned during ams_get_message_type_id + * \param[in] module_id Module ID of module calling function + * \param[in] instance_id Instance ID of module calling function + * \param[in] function callback that should be called when message is received + */ +int ams_unregister_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function); + +/** + * \brief Message Send + * + * Sends asynchronous message to all registered consumers by registered producer. + * The consumers registered on the same core may be called in context of a message producer + * + * \param[in] payload Message payload + */ +int ams_send(const struct ams_message_payload *payload); + +/** + * \brief Message Send to Module Instance + * + * Sends asynchronous message to specified module instance. + * The consumer registered on the same core may be called in context of a message producer + * + * \param[in] payload Message payload + * \param[in] module_id Module ID of consumer that messages is sent to + * \param[in] instance_id Instance ID of consumer that messages is sent to + */ +int ams_send_mi(const struct ams_message_payload *payload, + uint16_t module_id, uint16_t instance_id); + +static inline struct ams_shared_context *ams_ctx_get(void) +{ + return sof_get()->ams_shared_ctx; +} +#else +static inline int ams_init(void) { return 0; } +static inline int ams_get_message_type_id(const uint8_t *message_uuid, + uint32_t *message_type_id) { return 0; } + +static inline int ams_register_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id) { return 0; } + +static inline int ams_unregister_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id) { return 0; } + +static inline int ams_register_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function, + void *ctx) { return 0; } + +static inline int ams_unregister_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function) { return 0; } + +static inline int ams_send(const struct ams_message_payload *payload) { return 0; } + +static inline int ams_send_mi(const struct ams_message_payload *payload, uint16_t module_id, + uint16_t instance_id) { return 0; } + +static inline struct ams_shared_context *ams_ctx_get(void) +{ + return NULL; +} + +#endif /* CONFIG_AMS */ + +#if CONFIG_SMP && CONFIG_AMS +int process_incoming_message(uint32_t slot); +#else +static inline int process_incoming_message(uint32_t slot) { return 0; } +#endif /* CONFIG_SMP && CONFIG_AMS */ + +struct async_message_service **arch_ams_get(void); + +#endif /* __SOF_LIB_AMS_H__ */ diff --git a/src/init/init.c b/src/init/init.c index e7368a2c3..63339d2af 100644 --- a/src/init/init.c +++ b/src/init/init.c @@ -38,6 +38,7 @@ #include #include #endif +#include LOG_MODULE_REGISTER(init, CONFIG_SOF_LOG_LEVEL); @@ -188,6 +189,12 @@ int secondary_core_init(struct sof *sof) if (err < 0) return err; +#if CONFIG_AMS + err = ams_init(); + if (err < 0) + return err; +#endif + trace_point(TRACE_BOOT_PLATFORM); #ifndef __ZEPHYR__ @@ -272,6 +279,11 @@ static int primary_core_init(int argc, char *argv[], struct sof *sof) if (platform_init(sof) < 0) sof_panic(SOF_IPC_PANIC_PLATFORM); +#if CONFIG_AMS + if (ams_init()) + LOG_ERR("AMS Init failed!"); +#endif + #if CONFIG_IPC_MAJOR_4 /* Set current abi version of the IPC4 FwRegisters layout */ size_t ipc4_abi_ver_offset = offsetof(struct ipc4_fw_registers, abi_ver); diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt index c921824a3..bfcb2613c 100644 --- a/src/lib/CMakeLists.txt +++ b/src/lib/CMakeLists.txt @@ -25,3 +25,7 @@ add_local_sources(sof wait.c cpu-clk-manager.c ) + +if(CONFIG_AMS) +add_local_sources(sof ams.c) +endif() diff --git a/src/lib/ams.c b/src/lib/ams.c new file mode 100644 index 000000000..36252ce80 --- /dev/null +++ b/src/lib/ams.c @@ -0,0 +1,610 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2023 Intel Corporation + * + * Author: Krzysztof Frydryk + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(ams, CONFIG_SOF_LOG_LEVEL); + +DECLARE_SOF_UUID("ams", ams_uuid, 0xea9c4bca, 0x5b7d, 0x48c6, + 0x95, 0x86, 0x55, 0x3e, 0x27, 0x23, 0x5b, 0xeb); + +DECLARE_TR_CTX(ams_tr, SOF_UUID(ams_uuid), LOG_LEVEL_INFO); + +static struct ams_context ctx[CONFIG_CORE_COUNT]; + +static struct ams_shared_context __sparse_cache *ams_acquire(struct ams_shared_context *shared) +{ + struct coherent __sparse_cache *c = coherent_acquire(&shared->c, + sizeof(*shared)); + + return attr_container_of(c, struct ams_shared_context __sparse_cache, + c, __sparse_cache); +} + +static void ams_release(struct ams_shared_context __sparse_cache *shared) +{ + coherent_release(&shared->c, sizeof(*shared)); +} + +static struct uuid_idx __sparse_cache *ams_find_uuid_entry_by_uuid(struct ams_shared_context __sparse_cache *ctx_shared, + uint8_t const *uuid) +{ + unsigned int index; + struct uuid_idx __sparse_cache *uuid_table = ctx_shared->uuid_table; + + if (!uuid) + return NULL; + + /* try to find existing entry */ + for (index = 0; index < AMS_SERVICE_UUID_TABLE_SIZE; index++) { + if (memcmp((void *)uuid_table[index].message_uuid, uuid, UUID_SIZE) == 0) + return &uuid_table[index]; + } + + /* and add new one if needed */ + for (index = 0; index < AMS_SERVICE_UUID_TABLE_SIZE; index++) { + if (uuid_table[index].message_type_id == AMS_INVALID_MSG_TYPE) { + int ec = memcpy_s((void *)uuid_table[index].message_uuid, + sizeof(uuid_table[index].message_uuid), + uuid, UUID_SIZE); + if (ec != 0) { + tr_err(&ams_tr, "Failed to create UUID entry: %u", index); + return NULL; + } + + uuid_table[index].message_type_id = ++ctx_shared->last_used_msg_id; + return &uuid_table[index]; + } + } + + tr_err(&ams_tr, "No space to create UUID entry"); + return NULL; +} + +int ams_get_message_type_id(const uint8_t *message_uuid, + uint32_t *message_type_id) +{ + struct async_message_service *ams = *arch_ams_get(); + struct uuid_idx __sparse_cache *uuid_entry; + struct ams_shared_context __sparse_cache *shared_c; + + if (!ams->ams_context) + return -EINVAL; + + *message_type_id = AMS_INVALID_MSG_TYPE; + + shared_c = ams_acquire(ams->ams_context->shared); + + uuid_entry = ams_find_uuid_entry_by_uuid(shared_c, message_uuid); + if (!uuid_entry) { + ams_release(shared_c); + return -EINVAL; + } + + *message_type_id = uuid_entry->message_type_id; + ams_release(shared_c); + + return 0; +} + +static int ams_find_uuid_index_by_msg_type_id(struct ams_shared_context __sparse_cache *ctx_shared, + uint32_t const message_type_id) +{ + struct uuid_idx __sparse_cache *iter; + + if (message_type_id == AMS_INVALID_MSG_TYPE) + return -EINVAL; + + for (int i = 0; i < AMS_SERVICE_UUID_TABLE_SIZE; i++) { + iter = &ctx_shared->uuid_table[i]; + + /* we got the id */ + if (message_type_id == iter->message_type_id) + return i; + } + + return -ENOENT; +} + +int ams_register_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id) +{ + struct async_message_service *ams = *arch_ams_get(); + struct ams_producer __sparse_cache *producer_table; + struct ams_shared_context __sparse_cache *shared_c; + int idx; + int err = -EINVAL; + + if (!ams->ams_context) + return -EINVAL; + + shared_c = ams_acquire(ams->ams_context->shared); + + idx = ams_find_uuid_index_by_msg_type_id(shared_c, message_type_id); + if (idx < 0) { + ams_release(shared_c); + return -EINVAL; + } + + producer_table = shared_c->producer_table; + for (int iter = 0; iter < AMS_ROUTING_TABLE_SIZE; iter++) { + /* Search for first invalid entry */ + if (producer_table[iter].message_type_id == AMS_INVALID_MSG_TYPE) { + producer_table[iter].message_type_id = message_type_id; + producer_table[iter].producer_module_id = module_id; + producer_table[iter].producer_instance_id = instance_id; + + /* Exit loop since we added new entry */ + err = 0; + break; + } + } + + ams_release(shared_c); + return err; +} + +int ams_unregister_producer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id) +{ + struct async_message_service *ams = *arch_ams_get(); + struct ams_producer __sparse_cache *producer_table; + struct ams_shared_context __sparse_cache *shared_c; + int idx; + int err = -EINVAL; + + if (!ams->ams_context) + return -EINVAL; + + shared_c = ams_acquire(ams->ams_context->shared); + + idx = ams_find_uuid_index_by_msg_type_id(shared_c, message_type_id); + if (idx < 0) { + ams_release(shared_c); + return -EINVAL; + } + + producer_table = shared_c->producer_table; + for (int iter = 0; iter < AMS_ROUTING_TABLE_SIZE; iter++) { + if ((producer_table[iter].message_type_id == message_type_id) && + (producer_table[iter].producer_instance_id == instance_id) && + (producer_table[iter].producer_module_id == module_id)) { + producer_table[iter].message_type_id = AMS_INVALID_MSG_TYPE; + + /* Exit loop since we added new entry */ + err = 0; + break; + } + } + + ams_release(shared_c); + return err; +} + +int ams_register_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function, + void *ctx) +{ + struct async_message_service *ams = *arch_ams_get(); + struct ams_consumer_entry __sparse_cache *routing_table; + struct ams_shared_context __sparse_cache *shared_c; + int err = -EINVAL; + + if (!ams->ams_context || !function) + return -EINVAL; + + shared_c = ams_acquire(ams->ams_context->shared); + + routing_table = shared_c->rt_table; + for (int iter = 0; iter < AMS_ROUTING_TABLE_SIZE; iter++) { + /* Search for first invalid entry */ + if (routing_table[iter].message_type_id == AMS_INVALID_MSG_TYPE) { + /* Add entry to routing table for local service */ + routing_table[iter].consumer_callback = function; + routing_table[iter].message_type_id = message_type_id; + routing_table[iter].consumer_instance_id = instance_id; + routing_table[iter].consumer_module_id = module_id; + routing_table[iter].consumer_core_id = cpu_get_id(); + routing_table[iter].ctx = ctx; + + /* Exit loop since we added new entry */ + err = 0; + break; + } + } + + ams_release(shared_c); + return err; +} + +int ams_unregister_consumer(uint32_t message_type_id, + uint16_t module_id, + uint16_t instance_id, + ams_msg_callback_fn function) +{ + struct async_message_service *ams = *arch_ams_get(); + struct ams_consumer_entry __sparse_cache *routing_table; + struct ams_shared_context __sparse_cache *shared_c; + int err = -EINVAL; + + if (!ams->ams_context) + return -EINVAL; + + shared_c = ams_acquire(ams->ams_context->shared); + routing_table = shared_c->rt_table; + for (int iter = 0; iter < AMS_ROUTING_TABLE_SIZE; iter++) { + /* Search for required entry */ + if ((routing_table[iter].message_type_id == message_type_id) && + (routing_table[iter].consumer_module_id == module_id) && + (routing_table[iter].consumer_instance_id == instance_id) && + (routing_table[iter].consumer_callback == function)) { + /* Remove this entry from routing table */ + routing_table[iter].message_type_id = AMS_INVALID_MSG_TYPE; + routing_table[iter].consumer_callback = NULL; + + /* Exit loop since we removed entry */ + err = 0; + break; + } + } + + ams_release(shared_c); + return err; +} + +static uint32_t ams_push_slot(struct ams_shared_context __sparse_cache *ctx_shared, + const struct ams_message_payload *msg, + uint16_t module_id, uint16_t instance_id) +{ + int err; + + for (uint32_t i = 0; i < ARRAY_SIZE(ctx_shared->slots); ++i) { + if (ctx_shared->slot_uses[i] == 0) { + err = memcpy_s((void *)ctx_shared->slots[i].u.msg_raw, + sizeof(ctx_shared->slots[i].u.msg_raw), + msg, AMS_MESSAGE_SIZE(msg)); + + if (err != 0) + return AMS_INVALID_SLOT; + + ctx_shared->slots[i].module_id = module_id; + ctx_shared->slots[i].instance_id = instance_id; + ctx_shared->slot_done[i] = 0; + + return i; + } + } + + return AMS_INVALID_SLOT; +} + +static int ams_get_ixc_route_to_target(int source_core, int target_core) +{ + if (source_core >= CONFIG_CORE_COUNT || target_core >= CONFIG_CORE_COUNT) + return -EINVAL; + /* core 0 can target any core */ + if (source_core == PLATFORM_PRIMARY_CORE_ID) + return target_core; + /* other cores must proxy through main core */ + return source_core == target_core ? target_core : PLATFORM_PRIMARY_CORE_ID; +} + +static int send_message_over_ixc(struct async_message_service *ams, uint32_t slot, + struct ams_consumer_entry *target) +{ + if (!target) + return -EINVAL; + + int ixc_route = ams_get_ixc_route_to_target(cpu_get_id(), + target->consumer_core_id); + + struct idc_msg ams_request = { + .header = IDC_MSG_AMS | slot, + .extension = IDC_MSG_AMS_EXT, + .core = ixc_route, + .size = 0, + .payload = NULL}; + + /* send IDC message */ + return idc_send_msg(&ams_request, IDC_NON_BLOCKING); +} + +static int ams_send_over_ixc(struct async_message_service *ams, uint32_t slot, + struct ams_consumer_entry *target) +{ +#if CONFIG_SMP + return send_message_over_ixc(ams, slot, target); +#else + return -EINVAL; +#endif +} + +static int ams_message_send_internal(struct async_message_service *ams, + const struct ams_message_payload *const ams_message_payload, + uint16_t module_id, uint16_t instance_id, + uint32_t incoming_slot) +{ + bool found_any = false; + bool incoming = (incoming_slot != AMS_INVALID_SLOT); + struct ams_consumer_entry __sparse_cache *routing_table; + struct ams_shared_context __sparse_cache *shared_c; + uint32_t forwarded = 0; + uint32_t slot; + struct ams_consumer_entry ams_target; + int ixc_route; + int cpu_id; + int err = 0; + + if (!ams->ams_context || !ams_message_payload) + return -EINVAL; + + shared_c = ams_acquire(ams->ams_context->shared); + cpu_id = cpu_get_id(); + + if (incoming) + shared_c->slot_done[incoming_slot] |= BIT(cpu_id); + + routing_table = shared_c->rt_table; + + for (int iter = 0; iter < AMS_ROUTING_TABLE_SIZE; iter++) { + slot = AMS_INVALID_SLOT; + + /* Search for required entry */ + if (routing_table[iter].message_type_id != ams_message_payload->message_type_id) + continue; + + /* check if we want to limit to specific module* */ + if (module_id != AMS_ANY_ID && instance_id != AMS_ANY_ID) { + if (routing_table[iter].consumer_module_id != module_id || + routing_table[iter].consumer_instance_id != instance_id) { + continue; + } + } + + found_any = true; + ams_target = routing_table[iter]; + ixc_route = ams_get_ixc_route_to_target(cpu_id, + ams_target.consumer_core_id); + + if (ixc_route == cpu_id) { + /* we are on target core already */ + /* release lock here, callback are NOT supposed to change routing_table */ + ams_release(shared_c); + + ams_target.consumer_callback(ams_message_payload, ams_target.ctx); + err = 0; + } else { + /* we have to go through idc */ + if (incoming) { + /* if bit is set we are forwarding it again */ + if (shared_c->slot_done[incoming_slot] & BIT(ams_target.consumer_core_id)) { + /* slot was already processed for that core, skip it */ + continue; + } + } else { + slot = ams_push_slot(shared_c, + ams_message_payload, module_id, + instance_id); + if (slot == AMS_INVALID_SLOT) { + ams_release(shared_c); + return -EINVAL; + } + } + if ((forwarded & BIT(ams_target.consumer_core_id)) == 0) { + /* bump uses count, mark current as processed already */ + if (slot != AMS_INVALID_SLOT) { + shared_c->slot_uses[slot]++; + shared_c->slot_done[slot] |= BIT(cpu_id); + } + + /* release lock here, so other core can acquire it again */ + ams_release(shared_c); + + if (slot != AMS_INVALID_SLOT) { + forwarded |= BIT(ams_target.consumer_core_id); + err = ams_send_over_ixc(ams, slot, &ams_target); + if (err != 0) { + /* idc not sent, update slot refs locally */ + shared_c = ams_acquire(ams->ams_context->shared); + shared_c->slot_uses[slot]--; + shared_c->slot_done[slot] |= BIT(ams_target.consumer_core_id); + ams_release(shared_c); + } + } + } else { + /* message already forwarded, nothing to do here */ + ams_release(shared_c); + } + } + + /* acquire shared context lock again */ + shared_c = ams_acquire(ams->ams_context->shared); + } + + if (incoming) + shared_c->slot_uses[incoming_slot]--; + + ams_release(shared_c); + + if (!found_any) + tr_err(&ams_tr, "No entries found!"); + + return err; +} + +int ams_send(const struct ams_message_payload *const ams_message_payload) +{ + struct async_message_service *ams = *arch_ams_get(); + + return ams_message_send_internal(ams, ams_message_payload, AMS_ANY_ID, AMS_ANY_ID, + AMS_INVALID_SLOT); +} + +int ams_message_send_mi(struct async_message_service *ams, + const struct ams_message_payload *const ams_message_payload, + uint16_t target_module, uint16_t target_instance) +{ + return ams_message_send_internal(ams, ams_message_payload, target_module, + target_instance, AMS_INVALID_SLOT); +} + +int ams_send_mi(const struct ams_message_payload *const ams_message_payload, + uint16_t module_id, uint16_t instance_id) +{ + struct async_message_service *ams = *arch_ams_get(); + + return ams_message_send_mi(ams, ams_message_payload, module_id, instance_id); +} + +static int ams_process_slot(struct async_message_service *ams, uint32_t slot) +{ + struct ams_shared_context __sparse_cache *shared_c; + struct ams_message_payload msg; + uint16_t module_id; + uint16_t instance_id; + + shared_c = ams_acquire(ams->ams_context->shared); + + msg = shared_c->slots[slot].u.msg; + module_id = shared_c->slots[slot].module_id; + instance_id = shared_c->slots[slot].instance_id; + + ams_release(shared_c); + tr_info(&ams_tr, "ams_process_slot slot %d msg %d from 0x%08x", + slot, msg.message_type_id, + msg.producer_module_id << 16 | msg.producer_instance_id); + + return ams_message_send_internal(ams, &msg, module_id, instance_id, slot); +} + +#if CONFIG_SMP + +static void ams_task_add_slot_to_process(struct ams_task *ams_task, uint32_t slot) +{ + int flags; + + irq_local_disable(flags); + ams_task->pending_slots |= BIT(slot); + irq_local_enable(flags); +} + +int process_incoming_message(uint32_t slot) +{ + struct async_message_service *ams = *arch_ams_get(); + struct ams_task *task = &ams->ams_task; + + ams_task_add_slot_to_process(task, slot); + + return schedule_task(&task->ams_task, 0, 10000); +} + +#endif /* CONFIG_SMP */ + +/* ams task */ + +static enum task_state process_message(void *arg) +{ + struct ams_task *ams_task = arg; + uint32_t slot; + int flags; + + if (ams_task->pending_slots == 0) { + tr_err(&ams_tr, "Could not process message! Skipping."); + return SOF_TASK_STATE_COMPLETED; + } + + slot = 31 - clz(ams_task->pending_slots); + + ams_process_slot(ams_task->ams, slot); + + /* only done on main core, irq disabling is enough */ + irq_local_disable(flags); + ams_task->pending_slots &= ~BIT(slot); + irq_local_enable(flags); + schedule_task_cancel(&ams_task->ams_task); + + return SOF_TASK_STATE_COMPLETED; +} + +static int ams_task_init(void) +{ + int ret; + struct async_message_service *ams = *arch_ams_get(); + struct ams_task *task = &ams->ams_task; + + task->ams = ams; + + ret = schedule_task_init_ll(&task->ams_task, SOF_UUID(ams_uuid), SOF_SCHEDULE_LL_TIMER, + SOF_TASK_PRI_MED, process_message, &ams->ams_task, cpu_get_id(), 0); + if (ret) + tr_err(&ams_tr, "Could not init AMS task!"); + + return ret; +} + +static int ams_create_shared_context(struct ams_shared_context *ctx) +{ + struct ams_shared_context __sparse_cache *shared_c; + + shared_c = ams_acquire(ctx); + shared_c->last_used_msg_id = AMS_INVALID_MSG_TYPE; + ams_release(shared_c); + + return 0; +} + +int ams_init(void) +{ + struct ams_shared_context *ams_shared_ctx; + struct async_message_service **ams = arch_ams_get(); + struct sof *sof; + int ret = 0; + + *ams = rzalloc(SOF_MEM_ZONE_SYS, SOF_MEM_FLAG_COHERENT, SOF_MEM_CAPS_RAM, + sizeof(**ams)); + + (*ams)->ams_context = &ctx[cpu_get_id()]; + memset((*ams)->ams_context, 0, sizeof(*(*ams)->ams_context)); + + if (cpu_get_id() == PLATFORM_PRIMARY_CORE_ID) { + sof = sof_get(); + sof->ams_shared_ctx = coherent_init(struct ams_shared_context, c); + coherent_shared(sof->ams_shared_ctx, c); + } + + ams_shared_ctx = ams_ctx_get(); + (*ams)->ams_context->shared = ams_shared_ctx; + + ams_create_shared_context((*ams)->ams_context->shared); + +#if CONFIG_SMP + ret = ams_task_init(); +#endif /* CONFIG_SMP */ + + return ret; +} diff --git a/src/platform/Kconfig b/src/platform/Kconfig index 173951de5..13ac3c41f 100644 --- a/src/platform/Kconfig +++ b/src/platform/Kconfig @@ -471,6 +471,13 @@ config HAVE_AGENT with DMA based scheduling, where asynchronous interrupts can potentially starve the agent. +config AMS + bool "Enable Async Messaging Service" + default n + help + Enables Async Messaging Service. + Async messages are used to send messages between modules. + config AGENT_PANIC_ON_DELAY bool "Enable system agent time verification panic" default n diff --git a/xtos/include/rtos/idc.h b/xtos/include/rtos/idc.h index 931cd8952..4b3ece377 100644 --- a/xtos/include/rtos/idc.h +++ b/xtos/include/rtos/idc.h @@ -97,6 +97,12 @@ #define IDC_MSG_SECONDARY_CORE_CRASHED IDC_TYPE(0xA) #define IDC_MSG_SECONDARY_CORE_CRASHED_EXT(x) IDC_EXTENSION(x) +/** \brief IDC process async msg */ +#define IDC_MSG_AMS IDC_TYPE(0xB) +#define IDC_MSG_AMS_EXT IDC_EXTENSION(0x0) + +#define IDC_HEADER_TO_AMS_SLOT_MASK(x) (x & 0xFFFF) + /** \brief IDC_MSG_SECONDARY_CORE_CRASHED header fields. */ #define IDC_SCC_CORE_SHIFT 0 #define IDC_SCC_CORE_MASK 0xff diff --git a/xtos/include/rtos/sof.h b/xtos/include/rtos/sof.h index 6067176cf..4214122c9 100644 --- a/xtos/include/rtos/sof.h +++ b/xtos/include/rtos/sof.h @@ -23,6 +23,7 @@ struct ipc; struct ll_schedule_domain; struct mm; struct mn; +struct ams_shared_context; struct notify_data; struct pm_runtime_data; struct sa; @@ -76,6 +77,11 @@ struct sof { /* runtime power management data */ struct pm_runtime_data *prd; +#ifdef CONFIG_AMS + /* asynchronous messaging service */ + struct ams_shared_context *ams_shared_ctx; +#endif + /* shared notifier data */ struct notify_data *notify_data; diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index a3090685d..1cb6308bd 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -903,6 +903,10 @@ zephyr_library_sources_ifdef(CONFIG_HAVE_AGENT ${SOF_LIB_PATH}/agent.c ) +zephyr_library_sources_ifdef(CONFIG_AMS + ${SOF_LIB_PATH}/ams.c +) + zephyr_library_sources_ifdef(CONFIG_GDB_DEBUG ${SOF_DEBUG_PATH}/gdb/gdb.c ${SOF_DEBUG_PATH}/gdb/ringbuffer.c diff --git a/zephyr/include/rtos/idc.h b/zephyr/include/rtos/idc.h index 3c8c7e072..e5e8f1f8a 100644 --- a/zephyr/include/rtos/idc.h +++ b/zephyr/include/rtos/idc.h @@ -96,6 +96,12 @@ #define IDC_MSG_SECONDARY_CORE_CRASHED IDC_TYPE(0xA) #define IDC_MSG_SECONDARY_CORE_CRASHED_EXT(x) IDC_EXTENSION(x) +/** \brief IDC process async msg */ +#define IDC_MSG_AMS IDC_TYPE(0xB) +#define IDC_MSG_AMS_EXT IDC_EXTENSION(0x0) + +#define IDC_HEADER_TO_AMS_SLOT_MASK(x) (x & 0xFFFF) + /** \brief IDC_MSG_SECONDARY_CORE_CRASHED header fields. */ #define IDC_SCC_CORE_SHIFT 0 #define IDC_SCC_CORE_MASK 0xff diff --git a/zephyr/include/rtos/sof.h b/zephyr/include/rtos/sof.h index dfab4c8d2..b6447d90a 100644 --- a/zephyr/include/rtos/sof.h +++ b/zephyr/include/rtos/sof.h @@ -22,6 +22,7 @@ struct ipc; struct ll_schedule_domain; struct mm; struct mn; +struct ams_shared_context; struct notify_data; struct pm_runtime_data; struct sa; @@ -68,6 +69,11 @@ struct sof { /* runtime power management data */ struct pm_runtime_data *prd; +#ifdef CONFIG_AMS + /* asynchronous messaging service */ + struct ams_shared_context *ams_shared_ctx; +#endif + /* shared notifier data */ struct notify_data *notify_data; diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index b9ded92b8..c508b7c1f 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -120,6 +120,19 @@ void platform_interrupt_clear(uint32_t irq, uint32_t mask) } #endif +/* + * Asynchronous Messaging Service + * + * Use SOF async messaging service. + */ + +static struct async_message_service *host_ams[CONFIG_CORE_COUNT]; + +struct async_message_service **arch_ams_get(void) +{ + return host_ams + cpu_get_id(); +} + /* * Notifier. *