2017-07-07 20:29:30 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Linaro Limited
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2022-05-06 17:04:23 +08:00
|
|
|
#include <zephyr/init.h>
|
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/kernel_structs.h>
|
2018-01-31 12:41:47 +08:00
|
|
|
#include <kernel_internal.h>
|
2022-05-06 17:04:23 +08:00
|
|
|
#include <zephyr/sys/__assert.h>
|
2018-09-19 03:32:27 +08:00
|
|
|
#include <stdbool.h>
|
2022-05-06 17:04:23 +08:00
|
|
|
#include <zephyr/spinlock.h>
|
|
|
|
#include <zephyr/sys/check.h>
|
|
|
|
#include <zephyr/sys/libc-hooks.h>
|
|
|
|
#include <zephyr/logging/log.h>
|
2020-11-27 02:32:34 +08:00
|
|
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
2020-08-20 04:11:47 +08:00
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
struct k_spinlock z_mem_domain_lock;
|
2020-05-28 00:26:57 +08:00
|
|
|
static uint8_t max_partitions;
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-08-26 05:42:33 +08:00
|
|
|
struct k_mem_domain k_mem_domain_default;
|
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
static bool check_add_partition(struct k_mem_domain *domain,
|
|
|
|
struct k_mem_partition *part)
|
2018-03-01 06:11:41 +08:00
|
|
|
{
|
2020-08-20 06:06:37 +08:00
|
|
|
|
|
|
|
int i;
|
|
|
|
uintptr_t pstart, pend, dstart, dend;
|
|
|
|
|
|
|
|
if (part == NULL) {
|
|
|
|
LOG_ERR("NULL k_mem_partition provided");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_EXECUTE_XOR_WRITE
|
|
|
|
/* Arches where execution cannot be disabled should always return
|
|
|
|
* false to this check
|
|
|
|
*/
|
|
|
|
if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) &&
|
|
|
|
K_MEM_PARTITION_IS_WRITABLE(part->attr)) {
|
|
|
|
LOG_ERR("partition is writable and executable <start %lx>",
|
|
|
|
part->start);
|
|
|
|
return false;
|
|
|
|
}
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_EXECUTE_XOR_WRITE */
|
2020-08-20 06:06:37 +08:00
|
|
|
|
2021-03-29 22:03:49 +08:00
|
|
|
if (part->size == 0U) {
|
2020-08-20 06:06:37 +08:00
|
|
|
LOG_ERR("zero sized partition at %p with base 0x%lx",
|
|
|
|
part, part->start);
|
2018-03-01 06:11:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
pstart = part->start;
|
|
|
|
pend = part->start + part->size;
|
|
|
|
|
|
|
|
if (pend <= pstart) {
|
|
|
|
LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu",
|
|
|
|
part, part->start, part->size);
|
|
|
|
return false;
|
|
|
|
}
|
2018-03-01 06:11:41 +08:00
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
/* Check that this partition doesn't overlap any existing ones already
|
|
|
|
* in the domain
|
|
|
|
*/
|
|
|
|
for (i = 0; i < domain->num_partitions; i++) {
|
|
|
|
struct k_mem_partition *dpart = &domain->partitions[i];
|
2018-03-01 06:11:41 +08:00
|
|
|
|
2021-03-29 22:03:49 +08:00
|
|
|
if (dpart->size == 0U) {
|
2020-08-20 06:06:37 +08:00
|
|
|
/* Unused slot */
|
2018-03-01 06:11:41 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
dstart = dpart->start;
|
|
|
|
dend = dstart + dpart->size;
|
2018-03-01 06:11:41 +08:00
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
if (pend > dstart && dend > pstart) {
|
|
|
|
LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)",
|
|
|
|
part, part->start, part->size,
|
|
|
|
dpart->start, dpart->size);
|
2018-03-01 06:11:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2017-10-18 08:01:48 +08:00
|
|
|
|
2018-03-01 06:11:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
2020-07-18 04:48:36 +08:00
|
|
|
|
2021-11-12 04:49:58 +08:00
|
|
|
int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
|
|
|
|
struct k_mem_partition *parts[])
|
2017-07-07 20:29:30 +08:00
|
|
|
{
|
2018-07-25 02:35:55 +08:00
|
|
|
k_spinlock_key_t key;
|
2021-11-12 04:49:58 +08:00
|
|
|
int ret = 0;
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2021-11-12 04:49:58 +08:00
|
|
|
CHECKIF(domain == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECKIF(!(num_parts == 0U || parts != NULL)) {
|
|
|
|
LOG_ERR("parts array is NULL and num_parts is nonzero");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECKIF(!(num_parts <= max_partitions)) {
|
|
|
|
LOG_ERR("num_parts of %d exceeds maximum allowable partitions (%d)",
|
|
|
|
num_parts, max_partitions);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
key = k_spin_lock(&z_mem_domain_lock);
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2019-03-27 09:57:45 +08:00
|
|
|
domain->num_partitions = 0U;
|
2018-09-12 10:09:03 +08:00
|
|
|
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
|
2017-07-07 20:29:30 +08:00
|
|
|
sys_dlist_init(&domain->mem_domain_q);
|
|
|
|
|
2020-08-20 04:11:47 +08:00
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
|
2021-11-12 04:49:58 +08:00
|
|
|
ret = arch_mem_domain_init(domain);
|
2020-08-20 04:11:47 +08:00
|
|
|
|
|
|
|
if (ret != 0) {
|
|
|
|
LOG_ERR("architecture-specific initialization failed for domain %p with %d",
|
|
|
|
domain, ret);
|
2021-11-12 04:49:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto unlock_out;
|
2020-08-20 04:11:47 +08:00
|
|
|
}
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
|
2020-10-17 07:49:41 +08:00
|
|
|
if (num_parts != 0U) {
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0U; i < num_parts; i++) {
|
2021-11-12 04:49:58 +08:00
|
|
|
CHECKIF(!check_add_partition(domain, parts[i])) {
|
|
|
|
LOG_ERR("invalid partition index %d (%p)",
|
|
|
|
i, parts[i]);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto unlock_out;
|
|
|
|
}
|
2020-10-17 07:49:41 +08:00
|
|
|
|
|
|
|
domain->partitions[i] = *parts[i];
|
|
|
|
domain->num_partitions++;
|
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret2 = arch_mem_domain_partition_add(domain, i);
|
|
|
|
|
|
|
|
ARG_UNUSED(ret2);
|
|
|
|
CHECKIF(ret2 != 0) {
|
|
|
|
ret = ret2;
|
|
|
|
}
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
|
2020-10-17 07:49:41 +08:00
|
|
|
}
|
|
|
|
}
|
2020-08-20 04:11:47 +08:00
|
|
|
|
2021-11-12 04:49:58 +08:00
|
|
|
unlock_out:
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2021-11-12 04:49:58 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
2017-07-07 20:29:30 +08:00
|
|
|
}
|
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
int k_mem_domain_add_partition(struct k_mem_domain *domain,
|
|
|
|
struct k_mem_partition *part)
|
2017-07-07 20:29:30 +08:00
|
|
|
{
|
|
|
|
int p_idx;
|
2018-07-25 02:35:55 +08:00
|
|
|
k_spinlock_key_t key;
|
2021-11-11 03:11:00 +08:00
|
|
|
int ret = 0;
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
CHECKIF(domain == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECKIF(!check_add_partition(domain, part)) {
|
|
|
|
LOG_ERR("invalid partition %p", part);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-10-18 08:01:48 +08:00
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
key = k_spin_lock(&z_mem_domain_lock);
|
2017-07-07 20:29:30 +08:00
|
|
|
|
|
|
|
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
|
|
|
/* A zero-sized partition denotes it's a free partition */
|
2019-03-27 09:57:45 +08:00
|
|
|
if (domain->partitions[p_idx].size == 0U) {
|
2017-07-07 20:29:30 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
CHECKIF(!(p_idx < max_partitions)) {
|
|
|
|
LOG_ERR("no free partition slots available");
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto unlock_out;
|
|
|
|
}
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-08-26 07:47:44 +08:00
|
|
|
LOG_DBG("add partition base %lx size %zu to domain %p\n",
|
|
|
|
part->start, part->size, domain);
|
|
|
|
|
2017-07-07 20:29:30 +08:00
|
|
|
domain->partitions[p_idx].start = part->start;
|
|
|
|
domain->partitions[p_idx].size = part->size;
|
|
|
|
domain->partitions[p_idx].attr = part->attr;
|
|
|
|
|
|
|
|
domain->num_partitions++;
|
|
|
|
|
2020-08-26 08:02:38 +08:00
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = arch_mem_domain_partition_add(domain, p_idx);
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
|
2021-11-11 03:11:00 +08:00
|
|
|
|
|
|
|
unlock_out:
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2021-11-11 03:11:00 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
2017-07-07 20:29:30 +08:00
|
|
|
}
|
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
int k_mem_domain_remove_partition(struct k_mem_domain *domain,
|
2017-07-07 20:29:30 +08:00
|
|
|
struct k_mem_partition *part)
|
|
|
|
{
|
|
|
|
int p_idx;
|
2018-07-25 02:35:55 +08:00
|
|
|
k_spinlock_key_t key;
|
2021-11-11 03:11:00 +08:00
|
|
|
int ret = 0;
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
CHECKIF((domain == NULL) || (part == NULL)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
key = k_spin_lock(&z_mem_domain_lock);
|
2017-07-07 20:29:30 +08:00
|
|
|
|
|
|
|
/* find a partition that matches the given start and size */
|
|
|
|
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
|
|
|
if (domain->partitions[p_idx].start == part->start &&
|
|
|
|
domain->partitions[p_idx].size == part->size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
CHECKIF(!(p_idx < max_partitions)) {
|
|
|
|
LOG_ERR("no matching partition found");
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto unlock_out;
|
|
|
|
}
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-08-26 07:47:44 +08:00
|
|
|
LOG_DBG("remove partition base %lx size %zu from domain %p\n",
|
|
|
|
part->start, part->size, domain);
|
|
|
|
|
2020-08-26 08:02:38 +08:00
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = arch_mem_domain_partition_remove(domain, p_idx);
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
|
2017-10-09 14:07:31 +08:00
|
|
|
|
2018-12-03 21:21:54 +08:00
|
|
|
/* A zero-sized partition denotes it's a free partition */
|
2019-03-27 09:57:45 +08:00
|
|
|
domain->partitions[p_idx].size = 0U;
|
2017-07-07 20:29:30 +08:00
|
|
|
|
|
|
|
domain->num_partitions--;
|
|
|
|
|
2021-11-11 03:11:00 +08:00
|
|
|
unlock_out:
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2021-11-11 03:11:00 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
2017-07-07 20:29:30 +08:00
|
|
|
}
|
|
|
|
|
2021-11-13 08:10:10 +08:00
|
|
|
static int add_thread_locked(struct k_mem_domain *domain,
|
|
|
|
k_tid_t thread)
|
2017-07-07 20:29:30 +08:00
|
|
|
{
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-08-20 06:06:37 +08:00
|
|
|
__ASSERT_NO_MSG(domain != NULL);
|
|
|
|
__ASSERT_NO_MSG(thread != NULL);
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2020-08-26 07:47:44 +08:00
|
|
|
LOG_DBG("add thread %p to domain %p\n", thread, domain);
|
2017-07-07 20:29:30 +08:00
|
|
|
sys_dlist_append(&domain->mem_domain_q,
|
|
|
|
&thread->mem_domain_info.mem_domain_q_node);
|
|
|
|
thread->mem_domain_info.mem_domain = domain;
|
|
|
|
|
2020-08-26 08:02:38 +08:00
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = arch_mem_domain_thread_add(thread);
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
|
2021-11-13 08:10:10 +08:00
|
|
|
|
|
|
|
return ret;
|
2020-10-07 04:39:29 +08:00
|
|
|
}
|
|
|
|
|
2021-11-13 08:10:10 +08:00
|
|
|
static int remove_thread_locked(struct k_thread *thread)
|
2020-10-07 04:39:29 +08:00
|
|
|
{
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-10-07 04:39:29 +08:00
|
|
|
__ASSERT_NO_MSG(thread != NULL);
|
|
|
|
LOG_DBG("remove thread %p from memory domain %p\n",
|
|
|
|
thread, thread->mem_domain_info.mem_domain);
|
|
|
|
sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
|
2017-12-06 19:18:28 +08:00
|
|
|
|
2020-10-07 04:39:29 +08:00
|
|
|
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = arch_mem_domain_thread_remove(thread);
|
2024-03-08 19:00:10 +08:00
|
|
|
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
|
2021-11-13 08:10:10 +08:00
|
|
|
|
|
|
|
return ret;
|
2020-10-07 04:39:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called from thread object initialization */
|
|
|
|
void z_mem_domain_init_thread(struct k_thread *thread)
|
|
|
|
{
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret;
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
2020-10-07 04:39:29 +08:00
|
|
|
|
|
|
|
/* New threads inherit memory domain configuration from parent */
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread);
|
|
|
|
__ASSERT_NO_MSG(ret == 0);
|
|
|
|
ARG_UNUSED(ret);
|
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2020-10-07 04:39:29 +08:00
|
|
|
}
|
|
|
|
|
2024-03-07 04:59:36 +08:00
|
|
|
/* Called when thread aborts during teardown tasks. _sched_spinlock is held */
|
2020-10-07 04:39:29 +08:00
|
|
|
void z_mem_domain_exit_thread(struct k_thread *thread)
|
|
|
|
{
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret;
|
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
2021-11-13 08:10:10 +08:00
|
|
|
|
|
|
|
ret = remove_thread_locked(thread);
|
|
|
|
__ASSERT_NO_MSG(ret == 0);
|
|
|
|
ARG_UNUSED(ret);
|
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2020-10-07 04:39:29 +08:00
|
|
|
}
|
|
|
|
|
2021-11-13 08:10:10 +08:00
|
|
|
int k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
|
2020-10-07 04:39:29 +08:00
|
|
|
{
|
2021-11-13 08:10:10 +08:00
|
|
|
int ret = 0;
|
2020-10-07 04:39:29 +08:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
|
2020-10-07 06:53:43 +08:00
|
|
|
key = k_spin_lock(&z_mem_domain_lock);
|
2020-10-21 04:26:19 +08:00
|
|
|
if (thread->mem_domain_info.mem_domain != domain) {
|
2021-11-13 08:10:10 +08:00
|
|
|
ret = remove_thread_locked(thread);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = add_thread_locked(domain, thread);
|
|
|
|
}
|
2020-10-21 04:26:19 +08:00
|
|
|
}
|
2020-10-07 06:53:43 +08:00
|
|
|
k_spin_unlock(&z_mem_domain_lock, key);
|
2021-11-13 08:10:10 +08:00
|
|
|
|
|
|
|
return ret;
|
2017-07-07 20:29:30 +08:00
|
|
|
}
|
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 15:33:44 +08:00
|
|
|
static int init_mem_domain_module(void)
|
2017-07-07 20:29:30 +08:00
|
|
|
{
|
2021-11-12 04:49:58 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ARG_UNUSED(ret);
|
2017-07-07 20:29:30 +08:00
|
|
|
|
2019-11-08 04:43:29 +08:00
|
|
|
max_partitions = arch_mem_domain_max_partitions_get();
|
2017-07-07 20:29:30 +08:00
|
|
|
/*
|
|
|
|
* max_partitions must be less than or equal to
|
|
|
|
* CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
|
|
|
|
* out of bounds error.
|
|
|
|
*/
|
|
|
|
__ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
|
|
|
|
|
2021-11-12 04:49:58 +08:00
|
|
|
ret = k_mem_domain_init(&k_mem_domain_default, 0, NULL);
|
|
|
|
__ASSERT(ret == 0, "failed to init default mem domain");
|
|
|
|
|
2020-08-26 05:42:33 +08:00
|
|
|
#ifdef Z_LIBC_PARTITION_EXISTS
|
2021-11-11 03:11:00 +08:00
|
|
|
ret = k_mem_domain_add_partition(&k_mem_domain_default,
|
|
|
|
&z_libc_partition);
|
|
|
|
__ASSERT(ret == 0, "failed to add default libc mem partition");
|
2020-08-26 05:42:33 +08:00
|
|
|
#endif /* Z_LIBC_PARTITION_EXISTS */
|
|
|
|
|
2017-07-07 20:29:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
|
|
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|