2017-10-03 22:31:55 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Oticon A/S
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2019-06-19 02:45:40 +08:00
|
|
|
* For all purposes, Zephyr threads see a CPU running at an infinitely high
|
2017-10-03 22:31:55 +08:00
|
|
|
* clock.
|
|
|
|
*
|
|
|
|
* Therefore, the code will always run until completion after each interrupt,
|
2019-11-08 04:43:29 +08:00
|
|
|
* after which arch_cpu_idle() will be called releasing the execution back to
|
|
|
|
* the HW models.
|
2017-10-03 22:31:55 +08:00
|
|
|
*
|
|
|
|
* The HW models raising an interrupt will "awake the cpu" by calling
|
2019-12-19 22:25:35 +08:00
|
|
|
* posix_interrupt_raised() which will transfer control to the irq handler,
|
2019-12-19 23:14:06 +08:00
|
|
|
* which will run inside SW/Zephyr context. After which a arch_swap() to
|
2019-11-08 04:43:29 +08:00
|
|
|
* whatever Zephyr thread may follow. Again, once Zephyr is done, control is
|
|
|
|
* given back to the HW models.
|
2017-10-03 22:31:55 +08:00
|
|
|
*
|
|
|
|
* The Zephyr OS+APP code and the HW models are gated by a mutex +
|
|
|
|
* condition as there is no reason to let the zephyr threads run while the
|
|
|
|
* HW models run or vice versa
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2022-05-06 17:11:04 +08:00
|
|
|
#include <zephyr/arch/posix/posix_soc_if.h>
|
2017-10-03 22:31:55 +08:00
|
|
|
#include "posix_soc.h"
|
|
|
|
#include "posix_board_if.h"
|
|
|
|
#include "posix_core.h"
|
2018-02-04 17:11:56 +08:00
|
|
|
#include "posix_arch_internal.h"
|
2018-01-31 12:41:47 +08:00
|
|
|
#include "kernel_internal.h"
|
2018-07-29 02:42:06 +08:00
|
|
|
#include "soc.h"
|
2023-05-26 22:42:09 +08:00
|
|
|
#include "nce_if.h"
|
2017-10-03 22:31:55 +08:00
|
|
|
|
2023-05-26 22:42:09 +08:00
|
|
|
static void *nce_st;
|
2017-10-03 22:31:55 +08:00
|
|
|
|
|
|
|
int posix_is_cpu_running(void)
|
|
|
|
{
|
2023-05-26 22:42:09 +08:00
|
|
|
return nce_is_cpu_running(nce_st);
|
2017-10-03 22:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper function which changes the status of the CPU (halted or running)
|
|
|
|
* and waits until somebody else changes it to the opposite
|
|
|
|
*
|
|
|
|
* Both HW and SW threads will use this function to transfer control to the
|
|
|
|
* other side.
|
|
|
|
*
|
|
|
|
* This is how the idle thread halts the CPU and gets halted until the HW models
|
|
|
|
* raise a new interrupt; and how the HW models awake the CPU, and wait for it
|
|
|
|
* to complete and go to idle.
|
|
|
|
*/
|
2020-12-10 18:49:43 +08:00
|
|
|
void posix_change_cpu_state_and_wait(bool halted)
|
2017-10-03 22:31:55 +08:00
|
|
|
{
|
2023-05-26 22:42:09 +08:00
|
|
|
if (halted) {
|
|
|
|
nce_halt_cpu(nce_st);
|
|
|
|
} else {
|
|
|
|
nce_wake_cpu(nce_st);
|
2017-10-03 22:31:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* HW models shall call this function to "awake the CPU"
|
|
|
|
* when they are raising an interrupt
|
|
|
|
*/
|
|
|
|
void posix_interrupt_raised(void)
|
|
|
|
{
|
|
|
|
/* We change the CPU to running state (we awake it), and block this
|
2022-03-17 05:07:43 +08:00
|
|
|
* thread until the CPU is halted again
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
2023-05-26 22:42:09 +08:00
|
|
|
nce_wake_cpu(nce_st);
|
2017-10-03 22:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2019-11-08 04:43:29 +08:00
|
|
|
* Normally called from arch_cpu_idle():
|
2018-10-24 21:29:43 +08:00
|
|
|
* the idle loop will call this function to set the CPU to "sleep".
|
|
|
|
* Others may also call this function with care. The CPU will be set to sleep
|
|
|
|
* until some interrupt awakes it.
|
|
|
|
* Interrupts should be enabled before calling.
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
|
|
|
void posix_halt_cpu(void)
|
|
|
|
{
|
2018-10-24 21:29:43 +08:00
|
|
|
/*
|
|
|
|
* We set the CPU in the halted state (this blocks this pthread
|
|
|
|
* until the CPU is awoken again by the HW models)
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
2023-05-26 22:42:09 +08:00
|
|
|
nce_halt_cpu(nce_st);
|
2017-10-03 22:31:55 +08:00
|
|
|
|
2018-10-24 21:29:43 +08:00
|
|
|
/* We are awoken, normally that means some interrupt has just come
|
|
|
|
* => let the "irq handler" check if/what interrupt was raised
|
|
|
|
* and call the appropriate irq handler.
|
|
|
|
*
|
2019-11-08 04:43:29 +08:00
|
|
|
* Note that, the interrupt handling may trigger a arch_swap() to
|
|
|
|
* another Zephyr thread. When posix_irq_handler() returns, the Zephyr
|
2018-10-24 21:29:43 +08:00
|
|
|
* kernel has swapped back to this thread again
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
|
|
|
posix_irq_handler();
|
|
|
|
|
|
|
|
/*
|
2022-03-17 05:07:43 +08:00
|
|
|
* And we go back to whatever Zephyr thread called us.
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2019-11-08 04:43:29 +08:00
|
|
|
* Implementation of arch_cpu_atomic_idle() for this SOC
|
2017-10-03 22:31:55 +08:00
|
|
|
*/
|
|
|
|
void posix_atomic_halt_cpu(unsigned int imask)
|
|
|
|
{
|
|
|
|
posix_irq_full_unlock();
|
|
|
|
posix_halt_cpu();
|
|
|
|
posix_irq_unlock(imask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The HW models will call this function to "boot" the CPU
|
|
|
|
* == spawn the Zephyr init thread, which will then spawn
|
|
|
|
* anything it wants, and run until the CPU is set back to idle again
|
|
|
|
*/
|
|
|
|
void posix_boot_cpu(void)
|
|
|
|
{
|
2023-05-26 22:42:09 +08:00
|
|
|
nce_st = nce_init();
|
|
|
|
posix_arch_init();
|
|
|
|
nce_boot_cpu(nce_st, z_cstart);
|
2017-10-03 22:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Clean up all memory allocated by the SOC and POSIX core
|
|
|
|
*
|
|
|
|
* This function can be called from both HW and SW threads
|
|
|
|
*/
|
|
|
|
void posix_soc_clean_up(void)
|
|
|
|
{
|
2023-05-26 22:42:09 +08:00
|
|
|
nce_terminate(nce_st);
|
|
|
|
posix_arch_clean_up();
|
|
|
|
run_native_tasks(_NATIVE_ON_EXIT_LEVEL);
|
2017-10-03 22:31:55 +08:00
|
|
|
}
|