2015-04-11 07:44:37 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 09:01:01 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 07:44:37 +08:00
|
|
|
*/
|
|
|
|
|
2015-12-04 23:09:39 +08:00
|
|
|
/**
|
|
|
|
* @file
|
2017-03-12 00:33:29 +08:00
|
|
|
* @brief ARM Cortex-M exception/interrupt exit API
|
2015-12-04 23:09:39 +08:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Provides functions for performing kernel handling when exiting exceptions or
|
|
|
|
* interrupts that are installed directly in the vector table (i.e. that are not
|
|
|
|
* wrapped around by _isr_wrapper()).
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <offsets_short.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
#include <toolchain.h>
|
2015-05-29 01:56:47 +08:00
|
|
|
#include <arch/cpu.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
_ASM_FILE_PROLOGUE
|
|
|
|
|
|
|
|
GTEXT(_ExcExit)
|
|
|
|
GTEXT(_IntExit)
|
2016-11-08 23:36:50 +08:00
|
|
|
GDATA(_kernel)
|
2017-05-31 03:51:39 +08:00
|
|
|
#ifdef CONFIG_TIMESLICING
|
kernel: tickless: Add tickless kernel support
Adds event based scheduling logic to the kernel. Updates
management of timeouts, timers, idling etc. based on
time tracked at events rather than periodic ticks. Provides
interfaces for timers to announce and get next timer expiry
based on kernel scheduling decisions involving time slicing
of threads, timeouts and idling. Uses wall time units instead
of ticks in all scheduling activities.
The implementation involves changes in the following areas
1. Management of time in wall units like ms/us instead of ticks
The existing implementation already had an option to configure
number of ticks in a second. The new implementation builds on
top of that feature and provides option to set the size of the
scheduling granurality to mili seconds or micro seconds. This
allows most of the current implementation to be reused. Due to
this re-use and co-existence with tick based kernel, the names
of variables may contain the word "tick". However, in the
tickless kernel implementation, it represents the currently
configured time unit, which would be be mili seconds or
micro seconds. The APIs that take time as a parameter are not
impacted and they continue to pass time in mili seconds.
2. Timers would not be programmed in periodic mode
generating ticks. Instead they would be programmed in one
shot mode to generate events at the time the kernel scheduler
needs to gain control for its scheduling activities like
timers, timeouts, time slicing, idling etc.
3. The scheduler provides interfaces that the timer drivers
use to announce elapsed time and get the next time the scheduler
needs a timer event. It is possible that the scheduler may not
need another timer event, in which case the system would wait
for a non-timer event to wake it up if it is idling.
4. New APIs are defined to be implemented by timer drivers. Also
they need to handler timer events differently. These changes
have been done in the HPET timer driver. In future other timers
that support tickles kernel should implement these APIs as well.
These APIs are to re-program the timer, update and announce
elapsed time.
5. Philosopher and timer_api applications have been enabled to
test tickless kernel. Separate configuration files are created
which define the necessary CONFIG flags. Run these apps using
following command
make pristine && make BOARD=qemu_x86 CONF_FILE=prj_tickless.conf qemu
Jira: ZEP-339 ZEP-1946 ZEP-948
Change-Id: I7d950c31bf1ff929a9066fad42c2f0559a2e5983
Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-02-06 11:37:19 +08:00
|
|
|
GTEXT(_update_time_slice_before_swap)
|
|
|
|
#endif
|
2016-09-03 04:20:19 +08:00
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief Kernel housekeeping when exiting interrupt handler installed
|
2015-07-02 05:22:39 +08:00
|
|
|
* directly in vector table
|
|
|
|
*
|
|
|
|
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
|
|
|
|
* table to get the lowest interrupt latency possible. This allows the ISR to be
|
|
|
|
* invoked directly without going through a software interrupt table. However,
|
|
|
|
* upon exiting the ISR, some kernel work must still be performed, namely
|
|
|
|
* possible context switching. While ISRs connected in the software interrupt
|
|
|
|
* table do this automatically via a wrapper, ISRs connected directly in the
|
|
|
|
* vector table must invoke _IntExit() as the *very last* action before
|
|
|
|
* returning.
|
|
|
|
*
|
|
|
|
* e.g.
|
|
|
|
*
|
|
|
|
* void myISR(void)
|
|
|
|
* {
|
|
|
|
* printk("in %s\n", __FUNCTION__);
|
|
|
|
* doStuff();
|
|
|
|
* _IntExit();
|
|
|
|
* }
|
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return N/A
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
|
|
|
|
|
|
|
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
|
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief Kernel housekeeping when exiting exception handler installed
|
2015-07-02 05:22:39 +08:00
|
|
|
* directly in vector table
|
|
|
|
*
|
|
|
|
* See _IntExit().
|
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return N/A
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
|
|
|
|
2016-12-15 03:34:29 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
ldr r0, =_kernel
|
2015-04-11 07:44:37 +08:00
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
ldr r1, [r0, #_kernel_offset_to_current]
|
2016-12-22 05:00:35 +08:00
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
ldr r0, [r0, _kernel_offset_to_ready_q_cache]
|
|
|
|
cmp r0, r1
|
|
|
|
beq _EXIT_EXC
|
2016-09-03 04:20:19 +08:00
|
|
|
|
2017-08-02 13:47:24 +08:00
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
|
|
push {lr}
|
|
|
|
bl _update_time_slice_before_swap
|
2018-02-07 06:47:58 +08:00
|
|
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
2017-08-02 13:47:24 +08:00
|
|
|
pop {r0}
|
|
|
|
mov lr, r0
|
|
|
|
#else
|
|
|
|
pop {lr}
|
2018-02-07 06:47:58 +08:00
|
|
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
2017-08-02 13:47:24 +08:00
|
|
|
#endif /* CONFIG_TIMESLICING */
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
/* context switch required, pend the PendSV exception */
|
|
|
|
ldr r1, =_SCS_ICSR
|
|
|
|
ldr r2, =_SCS_ICSR_PENDSV
|
|
|
|
str r2, [r1]
|
|
|
|
|
|
|
|
_ExcExitWithGdbStub:
|
|
|
|
|
2016-10-06 06:43:36 +08:00
|
|
|
_EXIT_EXC:
|
2016-12-15 03:34:29 +08:00
|
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
2016-10-06 06:43:36 +08:00
|
|
|
|
2017-05-12 04:29:15 +08:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
|
|
push {lr}
|
|
|
|
bl _check_stack_sentinel
|
2018-02-07 06:47:58 +08:00
|
|
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
2017-05-12 04:29:15 +08:00
|
|
|
pop {r0}
|
|
|
|
mov lr, r0
|
|
|
|
#else
|
|
|
|
pop {lr}
|
2018-02-07 06:47:58 +08:00
|
|
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
2017-05-12 04:29:15 +08:00
|
|
|
#endif /* CONFIG_STACK_SENTINEL */
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
bx lr
|