2016-11-08 23:36:50 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 09:01:01 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-08 23:36:50 +08:00
|
|
|
*/
|
|
|
|
|
2019-10-24 23:08:21 +08:00
|
|
|
/*
|
|
|
|
* The purpose of this file is to provide essential/minimal kernel structure
|
|
|
|
* definitions, so that they can be used without including kernel.h.
|
|
|
|
*
|
|
|
|
* The following rules must be observed:
|
|
|
|
* 1. kernel_structs.h shall not depend on kernel.h both directly and
|
|
|
|
* indirectly (i.e. it shall not include any header files that include
|
|
|
|
* kernel.h in their dependency chain).
|
|
|
|
* 2. kernel.h shall imply kernel_structs.h, such that it shall not be
|
|
|
|
* necessary to include kernel_structs.h explicitly when kernel.h is
|
|
|
|
* included.
|
|
|
|
*/
|
|
|
|
|
2018-09-14 06:06:35 +08:00
|
|
|
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
|
|
|
|
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
|
2016-11-08 23:36:50 +08:00
|
|
|
|
2017-01-23 06:06:05 +08:00
|
|
|
#if !defined(_ASMLANGUAGE)
|
2020-04-04 01:01:03 +08:00
|
|
|
#include <sys/atomic.h>
|
2019-10-24 23:08:21 +08:00
|
|
|
#include <zephyr/types.h>
|
|
|
|
#include <sched_priq.h>
|
2019-06-26 22:33:41 +08:00
|
|
|
#include <sys/dlist.h>
|
2019-06-26 22:33:55 +08:00
|
|
|
#include <sys/util.h>
|
2020-04-04 01:01:03 +08:00
|
|
|
#include <sys/sys_heap.h>
|
2016-11-08 23:36:50 +08:00
|
|
|
#endif
|
|
|
|
|
2017-12-09 09:38:12 +08:00
|
|
|
#define K_NUM_PRIORITIES \
|
|
|
|
(CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
|
|
|
|
|
|
|
|
#define K_NUM_PRIO_BITMAPS ((K_NUM_PRIORITIES + 31) >> 5)
|
|
|
|
|
2016-11-25 03:08:08 +08:00
|
|
|
/*
|
2017-01-23 02:05:08 +08:00
|
|
|
* Bitmask definitions for the struct k_thread.thread_state field.
|
2016-11-25 03:08:08 +08:00
|
|
|
*
|
|
|
|
* Must be before kerneL_arch_data.h because it might need them to be already
|
|
|
|
* defined.
|
|
|
|
*/
|
|
|
|
|
2016-12-22 04:38:54 +08:00
|
|
|
/* states: common uses low bits, arch-specific use high bits */
|
|
|
|
|
2017-01-23 00:51:25 +08:00
|
|
|
/* Not a real thread */
|
2018-08-16 02:52:00 +08:00
|
|
|
#define _THREAD_DUMMY (BIT(0))
|
2016-11-25 03:08:08 +08:00
|
|
|
|
|
|
|
/* Thread is waiting on an object */
|
2018-08-16 02:52:00 +08:00
|
|
|
#define _THREAD_PENDING (BIT(1))
|
2016-11-25 03:08:08 +08:00
|
|
|
|
|
|
|
/* Thread has not yet started */
|
2018-08-16 02:52:00 +08:00
|
|
|
#define _THREAD_PRESTART (BIT(2))
|
2016-11-25 03:08:08 +08:00
|
|
|
|
|
|
|
/* Thread has terminated */
|
2018-08-16 02:52:00 +08:00
|
|
|
#define _THREAD_DEAD (BIT(3))
|
2016-11-25 03:08:08 +08:00
|
|
|
|
|
|
|
/* Thread is suspended */
|
2018-08-16 02:52:00 +08:00
|
|
|
#define _THREAD_SUSPENDED (BIT(4))
|
2016-11-25 03:08:08 +08:00
|
|
|
|
2020-09-03 00:20:38 +08:00
|
|
|
/* Thread is being aborted */
|
2019-02-20 08:03:39 +08:00
|
|
|
#define _THREAD_ABORTING (BIT(5))
|
|
|
|
|
2018-05-04 05:51:49 +08:00
|
|
|
/* Thread is present in the ready queue */
|
2020-01-14 22:26:10 +08:00
|
|
|
#define _THREAD_QUEUED (BIT(7))
|
2018-05-04 05:51:49 +08:00
|
|
|
|
2016-12-22 04:38:54 +08:00
|
|
|
/* end - states */
|
|
|
|
|
2017-05-12 04:29:15 +08:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
|
|
/* Magic value in lowest bytes of the stack */
|
|
|
|
#define STACK_SENTINEL 0xF0F0F0F0
|
|
|
|
#endif
|
2016-12-22 04:38:54 +08:00
|
|
|
|
2016-12-22 05:00:35 +08:00
|
|
|
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
|
2020-08-21 07:47:11 +08:00
|
|
|
#define _NON_PREEMPT_THRESHOLD 0x0080U
|
2016-12-22 05:00:35 +08:00
|
|
|
|
|
|
|
/* highest value of _thread_base.preempt at which a thread is preemptible */
|
2020-08-21 07:47:11 +08:00
|
|
|
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
|
2016-11-25 03:08:08 +08:00
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#if !defined(_ASMLANGUAGE)
|
|
|
|
|
|
|
|
struct _ready_q {
|
2018-01-30 06:55:20 +08:00
|
|
|
#ifndef CONFIG_SMP
|
2016-12-22 03:36:43 +08:00
|
|
|
/* always contains next thread to run: cannot be NULL */
|
2016-11-08 23:36:50 +08:00
|
|
|
struct k_thread *cache;
|
2018-01-30 06:55:20 +08:00
|
|
|
#endif
|
2016-11-08 23:36:50 +08:00
|
|
|
|
2018-06-29 01:38:14 +08:00
|
|
|
#if defined(CONFIG_SCHED_DUMB)
|
2018-05-04 05:51:49 +08:00
|
|
|
sys_dlist_t runq;
|
2018-06-29 01:38:14 +08:00
|
|
|
#elif defined(CONFIG_SCHED_SCALABLE)
|
2018-05-04 05:51:49 +08:00
|
|
|
struct _priq_rb runq;
|
2018-06-29 01:38:14 +08:00
|
|
|
#elif defined(CONFIG_SCHED_MULTIQ)
|
|
|
|
struct _priq_mq runq;
|
2018-05-04 05:51:49 +08:00
|
|
|
#endif
|
2016-11-08 23:36:50 +08:00
|
|
|
};
|
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
typedef struct _ready_q _ready_q_t;
|
|
|
|
|
2018-01-26 08:39:35 +08:00
|
|
|
struct _cpu {
|
2016-11-08 23:36:50 +08:00
|
|
|
/* nested interrupt count */
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t nested;
|
2016-11-08 23:36:50 +08:00
|
|
|
|
|
|
|
/* interrupt stack pointer base */
|
|
|
|
char *irq_stack;
|
|
|
|
|
|
|
|
/* currently scheduled thread */
|
|
|
|
struct k_thread *current;
|
2018-01-30 06:55:20 +08:00
|
|
|
|
2018-05-04 05:51:49 +08:00
|
|
|
/* one assigned idle thread per CPU */
|
|
|
|
struct k_thread *idle_thread;
|
|
|
|
|
2020-09-03 00:20:38 +08:00
|
|
|
/* If non-null, self-aborted thread that needs cleanup */
|
|
|
|
struct k_thread *pending_abort;
|
|
|
|
|
2019-11-14 01:41:52 +08:00
|
|
|
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
|
|
|
|
/* Coop thread preempted by current metairq, or NULL */
|
|
|
|
struct k_thread *metairq_preempted;
|
|
|
|
#endif
|
|
|
|
|
2018-09-26 01:56:09 +08:00
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
|
|
/* number of ticks remaining in current time slice */
|
|
|
|
int slice_ticks;
|
|
|
|
#endif
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint8_t id;
|
2018-05-31 02:23:02 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* True when _current is allowed to context switch */
|
2020-05-28 00:26:57 +08:00
|
|
|
uint8_t swap_ok;
|
2018-05-31 02:23:02 +08:00
|
|
|
#endif
|
2018-01-26 08:39:35 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _cpu _cpu_t;
|
|
|
|
|
2018-11-02 08:50:02 +08:00
|
|
|
struct z_kernel {
|
2020-03-17 01:18:03 +08:00
|
|
|
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
|
2016-11-08 23:36:50 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
|
|
/* queue of timeouts */
|
|
|
|
sys_dlist_t timeout_q;
|
|
|
|
#endif
|
|
|
|
|
2020-09-02 06:31:40 +08:00
|
|
|
#ifdef CONFIG_PM
|
2020-05-28 00:26:57 +08:00
|
|
|
int32_t idle; /* Number of ticks for kernel idling */
|
2016-11-08 23:36:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ready queue: can be big, keep after small fields, since some
|
2016-11-14 23:17:30 +08:00
|
|
|
* assembly (e.g. ARC) are limited in the encoding of the offset
|
2016-11-08 23:36:50 +08:00
|
|
|
*/
|
|
|
|
struct _ready_q ready_q;
|
|
|
|
|
2020-05-03 17:03:19 +08:00
|
|
|
#ifdef CONFIG_FPU_SHARING
|
2016-11-08 23:36:50 +08:00
|
|
|
/*
|
|
|
|
* A 'current_sse' field does not exist in addition to the 'current_fp'
|
|
|
|
* field since it's not possible to divide the IA-32 non-integer
|
|
|
|
* registers into 2 distinct blocks owned by differing threads. In
|
|
|
|
* other words, given that the 'fxnsave/fxrstor' instructions
|
|
|
|
* save/restore both the X87 FPU and XMM registers, it's not possible
|
|
|
|
* for a thread to only "own" the XMM registers.
|
|
|
|
*/
|
|
|
|
|
2017-10-29 19:10:22 +08:00
|
|
|
/* thread that owns the FP regs */
|
2016-11-08 23:36:50 +08:00
|
|
|
struct k_thread *current_fp;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
2017-10-29 19:10:22 +08:00
|
|
|
struct k_thread *threads; /* singly linked list of ALL threads */
|
2016-11-08 23:36:50 +08:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2018-11-02 08:50:02 +08:00
|
|
|
typedef struct z_kernel _kernel_t;
|
2016-11-08 23:36:50 +08:00
|
|
|
|
2018-11-02 08:50:02 +08:00
|
|
|
extern struct z_kernel _kernel;
|
2016-11-08 23:36:50 +08:00
|
|
|
|
2018-01-26 08:39:35 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2020-02-07 05:39:52 +08:00
|
|
|
|
|
|
|
/* True if the current context can be preempted and migrated to
|
|
|
|
* another SMP CPU.
|
|
|
|
*/
|
|
|
|
bool z_smp_cpu_mobile(void);
|
|
|
|
|
|
|
|
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
|
|
|
|
arch_curr_cpu(); })
|
|
|
|
#define _current k_current_get()
|
|
|
|
|
2018-01-26 08:39:35 +08:00
|
|
|
#else
|
2018-05-04 05:51:49 +08:00
|
|
|
#define _current_cpu (&_kernel.cpus[0])
|
2020-03-17 01:18:03 +08:00
|
|
|
#define _current _kernel.cpus[0].current
|
2018-01-26 08:39:35 +08:00
|
|
|
#endif
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#define _timeout_q _kernel.timeout_q
|
|
|
|
|
2020-04-01 00:17:09 +08:00
|
|
|
/* kernel wait queue record */
|
|
|
|
|
|
|
|
#ifdef CONFIG_WAITQ_SCALABLE
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
struct _priq_rb waitq;
|
|
|
|
} _wait_q_t;
|
|
|
|
|
|
|
|
extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
|
|
|
|
|
|
|
#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
sys_dlist_t waitq;
|
|
|
|
} _wait_q_t;
|
|
|
|
|
|
|
|
#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* kernel timeout record */
|
|
|
|
|
|
|
|
struct _timeout;
|
|
|
|
typedef void (*_timeout_func_t)(struct _timeout *t);
|
|
|
|
|
|
|
|
struct _timeout {
|
|
|
|
sys_dnode_t node;
|
|
|
|
_timeout_func_t fn;
|
2020-06-17 23:56:40 +08:00
|
|
|
#ifdef CONFIG_TIMEOUT_64BIT
|
|
|
|
/* Can't use k_ticks_t for header dependency reasons */
|
2020-09-04 03:30:04 +08:00
|
|
|
int64_t dticks;
|
2020-06-17 23:56:40 +08:00
|
|
|
#else
|
2020-09-04 03:30:04 +08:00
|
|
|
int32_t dticks;
|
2020-06-17 23:56:40 +08:00
|
|
|
#endif
|
2020-04-01 00:17:09 +08:00
|
|
|
};
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
2018-09-14 06:06:35 +08:00
|
|
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
|