2015-05-05 06:17:38 +08:00
|
|
|
/* wait queue for multiple fibers on nanokernel objects */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Wind River Systems, Inc.
|
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2015-05-05 06:17:38 +08:00
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2015-05-05 06:17:38 +08:00
|
|
|
*
|
2015-10-07 00:00:37 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2015-05-05 06:17:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _kernel_nanokernel_include_wait_q__h_
|
|
|
|
#define _kernel_nanokernel_include_wait_q__h_
|
|
|
|
|
2015-06-19 23:07:02 +08:00
|
|
|
#include <nano_private.h>
|
2015-05-05 06:17:38 +08:00
|
|
|
|
2016-01-23 01:38:49 +08:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2015-05-05 06:17:38 +08:00
|
|
|
/* reset a wait queue, call during operation */
|
|
|
|
static inline void _nano_wait_q_reset(struct _nano_queue *wait_q)
|
|
|
|
{
|
|
|
|
wait_q->head = (void *)0;
|
|
|
|
wait_q->tail = (void *)&(wait_q->head);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize a wait queue: call only during object initialization */
|
|
|
|
static inline void _nano_wait_q_init(struct _nano_queue *wait_q)
|
|
|
|
{
|
|
|
|
_nano_wait_q_reset(wait_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove first fiber from a wait queue and put it on the ready queue, knowing
|
|
|
|
* that the wait queue is not empty.
|
|
|
|
*/
|
2015-08-20 23:04:01 +08:00
|
|
|
static inline
|
|
|
|
struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
2015-05-05 06:17:38 +08:00
|
|
|
{
|
2015-08-20 23:04:01 +08:00
|
|
|
struct tcs *tcs = wait_q->head;
|
2015-05-05 06:17:38 +08:00
|
|
|
|
|
|
|
if (wait_q->tail == wait_q->head) {
|
|
|
|
_nano_wait_q_reset(wait_q);
|
|
|
|
} else {
|
2015-08-20 23:04:01 +08:00
|
|
|
wait_q->head = tcs->link;
|
2015-05-05 06:17:38 +08:00
|
|
|
}
|
2015-08-20 23:04:01 +08:00
|
|
|
tcs->link = 0;
|
2015-05-05 06:17:38 +08:00
|
|
|
|
2015-11-25 02:36:17 +08:00
|
|
|
_nano_fiber_ready(tcs);
|
2015-08-20 23:04:01 +08:00
|
|
|
return tcs;
|
2015-05-05 06:17:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove first fiber from a wait queue and put it on the ready queue.
|
|
|
|
* Abort and return NULL if the wait queue is empty.
|
|
|
|
*/
|
2015-08-20 23:04:01 +08:00
|
|
|
static inline struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
2015-05-05 06:17:38 +08:00
|
|
|
{
|
|
|
|
return wait_q->head ? _nano_wait_q_remove_no_check(wait_q) : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* put current fiber on specified wait queue */
|
|
|
|
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
|
|
|
{
|
2015-08-20 23:04:01 +08:00
|
|
|
((struct tcs *)wait_q->tail)->link = _nanokernel.current;
|
2015-05-09 06:12:45 +08:00
|
|
|
wait_q->tail = _nanokernel.current;
|
2015-05-05 06:17:38 +08:00
|
|
|
}
|
|
|
|
|
2015-06-15 02:09:07 +08:00
|
|
|
#ifdef CONFIG_NANO_TIMEOUTS
|
2015-08-20 23:04:01 +08:00
|
|
|
static inline void _nano_timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
2015-06-15 02:09:07 +08:00
|
|
|
{
|
2015-08-20 23:04:01 +08:00
|
|
|
struct _nano_queue *wait_q = tcs->nano_timeout.wait_q;
|
2015-06-15 02:09:07 +08:00
|
|
|
|
2015-08-20 23:04:01 +08:00
|
|
|
if (wait_q->head == tcs) {
|
2015-06-15 02:09:07 +08:00
|
|
|
if (wait_q->tail == wait_q->head) {
|
|
|
|
_nano_wait_q_reset(wait_q);
|
|
|
|
} else {
|
2015-08-20 23:04:01 +08:00
|
|
|
wait_q->head = tcs->link;
|
2015-06-15 02:09:07 +08:00
|
|
|
}
|
|
|
|
} else {
|
2015-08-20 23:04:01 +08:00
|
|
|
struct tcs *prev = wait_q->head;
|
2015-06-15 02:09:07 +08:00
|
|
|
|
2015-08-20 23:04:01 +08:00
|
|
|
while (prev->link != tcs) {
|
2015-06-15 02:09:07 +08:00
|
|
|
prev = prev->link;
|
|
|
|
}
|
2015-08-20 23:04:01 +08:00
|
|
|
prev->link = tcs->link;
|
|
|
|
if (wait_q->tail == tcs) {
|
2015-06-15 02:09:07 +08:00
|
|
|
wait_q->tail = prev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#include <timeout_q.h>
|
2016-01-07 23:41:40 +08:00
|
|
|
|
|
|
|
#define _NANO_TIMEOUT_TICK_GET() sys_tick_get()
|
|
|
|
|
|
|
|
#define _NANO_TIMEOUT_ADD(pq, ticks) \
|
|
|
|
do { \
|
|
|
|
if ((ticks) != TICKS_UNLIMITED) { \
|
|
|
|
_nano_timeout_add(_nanokernel.current, (pq), (ticks)); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
idle: fix tasks waiting when NANO_TIMEOUTS is enabled
Fix an issue where, if a task is pending on a nano timeout, the duration
it wants to wait is not taken into account by the tickless idle code.
This could cause a system to wait forever, or to the limit of the timer
hardware (which is forever, for all intents and purposes).
This fix is to add one field in the nanokernel data structure for one
task to record the amount of ticks it will wait on a nano timeout. Only
one task has to be able to record this information, since, these waits
being looping busy waits, the task of highest priority is the only task
that can be actively waiting with a nano timeout. If a task of lower
priority was previously waiting, and a new task is now waiting, it means
that the wait of the original task has been interrupted, which will
cause said task to run the busy loop on the object again when it gets
scheduled, and the number of ticks it wants to wait has to be recomputed
and recorded again.
Change-Id: Ibcf0f288fc42d96897642cfee00ab7359716703f
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-01-27 02:47:03 +08:00
|
|
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) \
|
|
|
|
_nanokernel.task_timeout = (ticks)
|
2015-06-15 02:09:07 +08:00
|
|
|
#else
|
2015-08-20 23:04:01 +08:00
|
|
|
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
|
|
|
#define _nano_timeout_abort(tcs) do { } while ((0))
|
2015-06-15 02:09:07 +08:00
|
|
|
#define _nano_get_earliest_timeouts_deadline() ((uint32_t)TICKS_UNLIMITED)
|
2016-01-07 23:41:40 +08:00
|
|
|
|
|
|
|
#define _NANO_TIMEOUT_TICK_GET() 0
|
|
|
|
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
|
idle: fix tasks waiting when NANO_TIMEOUTS is enabled
Fix an issue where, if a task is pending on a nano timeout, the duration
it wants to wait is not taken into account by the tickless idle code.
This could cause a system to wait forever, or to the limit of the timer
hardware (which is forever, for all intents and purposes).
This fix is to add one field in the nanokernel data structure for one
task to record the amount of ticks it will wait on a nano timeout. Only
one task has to be able to record this information, since, these waits
being looping busy waits, the task of highest priority is the only task
that can be actively waiting with a nano timeout. If a task of lower
priority was previously waiting, and a new task is now waiting, it means
that the wait of the original task has been interrupted, which will
cause said task to run the busy loop on the object again when it gets
scheduled, and the number of ticks it wants to wait has to be recomputed
and recorded again.
Change-Id: Ibcf0f288fc42d96897642cfee00ab7359716703f
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-01-27 02:47:03 +08:00
|
|
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
|
2015-06-15 02:09:07 +08:00
|
|
|
#endif
|
|
|
|
|
2016-01-23 01:38:49 +08:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-05-05 06:17:38 +08:00
|
|
|
#endif /* _kernel_nanokernel_include_wait_q__h_ */
|