2015-06-19 22:32:52 +08:00
|
|
|
/* k_server.c - microkernel server */
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2010, 2012-2015 Wind River Systems, Inc.
|
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2015-04-11 07:44:37 +08:00
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2015-04-11 07:44:37 +08:00
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2015-04-11 07:44:37 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2015-10-21 00:42:33 +08:00
|
|
|
* DESCRIPTION
|
|
|
|
* This module implements the microkernel server, which processes service
|
|
|
|
* requests from tasks (and, less commonly, fibers and ISRs). The requests are
|
|
|
|
* service by a high priority fiber, thereby ensuring that requests are
|
|
|
|
* processed in a timely manner and in a single threaded manner that prevents
|
|
|
|
* simultaneous requests from interfering with each other.
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
#include <toolchain.h>
|
|
|
|
#include <sections.h>
|
2015-06-19 22:56:52 +08:00
|
|
|
#include <micro_private.h>
|
2015-06-19 23:07:02 +08:00
|
|
|
#include <nano_private.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
#include <microkernel.h>
|
|
|
|
#include <nanokernel.h>
|
|
|
|
#include <misc/__assert.h>
|
|
|
|
#include <drivers/system_timer.h>
|
|
|
|
|
2015-04-27 23:28:16 +08:00
|
|
|
extern const kernelfunc _k_server_dispatch_table[];
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief Select task to be executed by microkernel
|
2015-07-02 05:22:39 +08:00
|
|
|
*
|
|
|
|
* Locates that highest priority task queue that is non-empty and chooses the
|
|
|
|
* task at the head of that queue. It's guaranteed that there will always be
|
|
|
|
* a non-empty queue, since the idle task is always executable.
|
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return pointer to selected task
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-08-18 04:27:31 +08:00
|
|
|
static struct k_task *next_task_select(void)
|
2015-04-25 04:34:53 +08:00
|
|
|
{
|
|
|
|
int K_PrioListIdx;
|
|
|
|
|
|
|
|
#if (CONFIG_NUM_TASK_PRIORITIES <= 32)
|
2015-08-15 05:16:16 +08:00
|
|
|
K_PrioListIdx = find_lsb_set(_k_task_priority_bitmap[0]) - 1;
|
2015-04-25 04:34:53 +08:00
|
|
|
#else
|
|
|
|
int bit_map;
|
|
|
|
int set_bit_pos;
|
|
|
|
|
|
|
|
K_PrioListIdx = -1;
|
|
|
|
for (bit_map = 0; ; bit_map++) {
|
2015-08-15 05:16:16 +08:00
|
|
|
set_bit_pos = find_lsb_set(_k_task_priority_bitmap[bit_map]);
|
2015-04-25 04:34:53 +08:00
|
|
|
if (set_bit_pos) {
|
|
|
|
K_PrioListIdx += set_bit_pos;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
K_PrioListIdx += 32;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-08-25 04:45:34 +08:00
|
|
|
return _k_task_priority_list[K_PrioListIdx].head;
|
2015-04-25 04:34:53 +08:00
|
|
|
}
|
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief The microkernel thread entry point
|
2015-07-02 05:22:39 +08:00
|
|
|
*
|
|
|
|
* This function implements the microkernel fiber. It waits for command
|
2015-07-14 22:58:57 +08:00
|
|
|
* packets to arrive on its command stack. It executes all commands on the
|
2015-07-02 05:22:39 +08:00
|
|
|
* stack and then sets up the next task that is ready to run. Next it
|
2015-07-14 22:58:57 +08:00
|
|
|
* goes to wait on further inputs on the command stack.
|
2015-07-02 05:22:39 +08:00
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return Does not return.
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-08-13 03:41:41 +08:00
|
|
|
FUNC_NORETURN void _k_server(int unused1, int unused2)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
|
|
|
struct k_args *pArgs;
|
2015-08-18 04:27:31 +08:00
|
|
|
struct k_task *pNextTask;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-08-13 03:41:41 +08:00
|
|
|
ARG_UNUSED(unused1);
|
|
|
|
ARG_UNUSED(unused2);
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
/* indicate that failure of this fiber may be fatal to the entire system
|
|
|
|
*/
|
|
|
|
|
2015-05-09 06:12:45 +08:00
|
|
|
_nanokernel.current->flags |= ESSENTIAL;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
while (1) { /* forever */
|
|
|
|
pArgs = (struct k_args *)nano_fiber_stack_pop_wait(
|
2015-04-27 23:28:29 +08:00
|
|
|
&_k_command_stack); /* will schedule */
|
2015-04-11 07:44:37 +08:00
|
|
|
do {
|
2015-10-29 05:14:25 +08:00
|
|
|
int cmd_type = (int)pArgs & KERNEL_CMD_TYPE_MASK;
|
|
|
|
|
|
|
|
if (cmd_type == KERNEL_CMD_PACKET_TYPE) {
|
|
|
|
|
|
|
|
/* process command packet */
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
#ifdef CONFIG_TASK_MONITOR
|
2015-10-29 05:14:25 +08:00
|
|
|
if (_k_monitor_mask & MON_KSERV) {
|
2015-04-29 02:35:51 +08:00
|
|
|
_k_task_monitor_args(pArgs);
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
#endif
|
2015-10-29 05:14:25 +08:00
|
|
|
(*pArgs->Comm)(pArgs);
|
2015-04-11 07:44:37 +08:00
|
|
|
} else {
|
2015-10-29 05:14:25 +08:00
|
|
|
|
|
|
|
/* cmd_type == KERNEL_CMD_EVENT_TYPE */
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
#ifdef CONFIG_TASK_MONITOR
|
2015-10-29 05:14:25 +08:00
|
|
|
if (_k_monitor_mask & MON_EVENT) {
|
2015-04-29 02:35:51 +08:00
|
|
|
_k_task_monitor_args(pArgs);
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
#endif
|
2015-10-29 05:14:25 +08:00
|
|
|
kevent_t event = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;
|
|
|
|
|
|
|
|
_k_do_event_signal(event);
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2015-10-21 00:42:33 +08:00
|
|
|
/*
|
|
|
|
* check if another fiber (of equal or greater priority)
|
|
|
|
* needs to run
|
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-05-09 06:12:45 +08:00
|
|
|
if (_nanokernel.fiber) {
|
2015-04-11 07:44:37 +08:00
|
|
|
fiber_yield();
|
|
|
|
}
|
2015-04-27 23:28:29 +08:00
|
|
|
} while (nano_fiber_stack_pop(&_k_command_stack, (void *)&pArgs));
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-04-25 04:34:53 +08:00
|
|
|
pNextTask = next_task_select();
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-04-27 23:28:27 +08:00
|
|
|
if (_k_current_task != pNextTask) {
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-10-21 00:42:33 +08:00
|
|
|
/*
|
|
|
|
* switch from currently selected task to a different
|
|
|
|
* one
|
|
|
|
*/
|
2015-05-08 22:08:12 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_WORKLOAD_MONITOR
|
2015-08-24 22:42:35 +08:00
|
|
|
if (pNextTask->id == 0x00000000) {
|
2015-05-08 22:08:12 +08:00
|
|
|
_k_workload_monitor_idle_start();
|
2015-08-24 22:42:35 +08:00
|
|
|
} else if (_k_current_task->id == 0x00000000) {
|
2015-05-08 22:08:12 +08:00
|
|
|
_k_workload_monitor_idle_end();
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
#endif
|
2015-05-08 22:08:12 +08:00
|
|
|
|
2015-04-27 23:28:27 +08:00
|
|
|
_k_current_task = pNextTask;
|
2015-08-20 23:04:01 +08:00
|
|
|
_nanokernel.task = (struct tcs *)pNextTask->workspace;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_TASK_MONITOR
|
2015-04-27 23:28:38 +08:00
|
|
|
if (_k_monitor_mask & MON_TSWAP) {
|
2015-04-29 02:35:51 +08:00
|
|
|
_k_task_monitor(_k_current_task, 0);
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-08-13 03:37:58 +08:00
|
|
|
* Code analyzers may complain that _k_server() uses an infinite loop
|
2015-04-11 07:44:37 +08:00
|
|
|
* unless we indicate that this is intentional
|
|
|
|
*/
|
|
|
|
|
|
|
|
CODE_UNREACHABLE;
|
|
|
|
}
|