2016-11-08 23:36:50 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _offsets_short__h_
|
|
|
|
#define _offsets_short__h_
|
|
|
|
|
|
|
|
#include <offsets.h>
|
|
|
|
#include <offsets_short_arch.h>
|
|
|
|
|
|
|
|
/* kernel */
|
|
|
|
|
|
|
|
/* main */
|
|
|
|
|
|
|
|
#define _kernel_offset_to_nested \
|
|
|
|
(___kernel_t_nested_OFFSET)
|
|
|
|
|
|
|
|
#define _kernel_offset_to_irq_stack \
|
|
|
|
(___kernel_t_irq_stack_OFFSET)
|
|
|
|
|
|
|
|
#define _kernel_offset_to_current \
|
|
|
|
(___kernel_t_current_OFFSET)
|
|
|
|
|
|
|
|
#define _kernel_offset_to_idle \
|
|
|
|
(___kernel_t_idle_OFFSET)
|
|
|
|
|
|
|
|
#define _kernel_offset_to_current_fp \
|
|
|
|
(___kernel_t_current_fp_OFFSET)
|
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
#define _kernel_offset_to_ready_q_cache \
|
|
|
|
(___kernel_t_ready_q_OFFSET + ___ready_q_t_cache_OFFSET)
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
/* end - kernel */
|
|
|
|
|
|
|
|
/* threads */
|
|
|
|
|
|
|
|
/* main */
|
|
|
|
|
|
|
|
#define _thread_offset_to_callee_saved \
|
|
|
|
(___thread_t_callee_saved_OFFSET)
|
|
|
|
|
|
|
|
/* base */
|
|
|
|
|
2016-12-22 04:38:54 +08:00
|
|
|
#define _thread_offset_to_thread_state \
|
|
|
|
(___thread_t_base_OFFSET + ___thread_base_t_thread_state_OFFSET)
|
|
|
|
|
|
|
|
#define _thread_offset_to_execution_flags \
|
|
|
|
(___thread_t_base_OFFSET + ___thread_base_t_execution_flags_OFFSET)
|
2016-11-08 23:36:50 +08:00
|
|
|
|
|
|
|
#define _thread_offset_to_prio \
|
|
|
|
(___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET)
|
|
|
|
|
|
|
|
#define _thread_offset_to_sched_locked \
|
|
|
|
(___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET)
|
|
|
|
|
2016-12-22 05:00:35 +08:00
|
|
|
#define _thread_offset_to_preempt \
|
|
|
|
(___thread_t_base_OFFSET + ___thread_base_t_preempt_OFFSET)
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#define _thread_offset_to_esf \
|
|
|
|
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)
|
|
|
|
|
|
|
|
|
|
|
|
/* end - threads */
|
|
|
|
|
|
|
|
#endif /* _offsets_short__h_ */
|