2015-04-11 07:44:37 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 09:01:01 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 07:44:37 +08:00
|
|
|
*/
|
|
|
|
|
2015-12-04 23:09:39 +08:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Handling of transitions to-and-from regular IRQs (RIRQ)
|
|
|
|
*
|
|
|
|
* This module implements the code for handling entry to and exit from regular
|
|
|
|
* IRQs.
|
|
|
|
*
|
|
|
|
* See isr_wrapper.S for details.
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <offsets_short.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
#include <toolchain.h>
|
2015-05-29 01:56:47 +08:00
|
|
|
#include <arch/cpu.h>
|
2016-11-01 21:07:34 +08:00
|
|
|
#include <swap_macros.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
GTEXT(_rirq_enter)
|
|
|
|
GTEXT(_rirq_exit)
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-27 01:39:20 +08:00
|
|
|
GTEXT(_rirq_common_interrupt_swap)
|
|
|
|
|
2016-10-29 06:27:11 +08:00
|
|
|
#if 0 /* TODO: when FIRQ is not present, all would be regular */
|
|
|
|
#define NUM_REGULAR_IRQ_PRIO_LEVELS CONFIG_NUM_IRQ_PRIO_LEVELS
|
|
|
|
#else
|
|
|
|
#define NUM_REGULAR_IRQ_PRIO_LEVELS (CONFIG_NUM_IRQ_PRIO_LEVELS-1)
|
|
|
|
#endif
|
|
|
|
/* note: the above define assumes that prio 0 IRQ is for FIRQ, and
|
|
|
|
* that all others are regular interrupts.
|
|
|
|
* TODO: Revist this if FIRQ becomes configurable.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if NUM_REGULAR_IRQ_PRIO_LEVELS > 1
|
|
|
|
#error "nested regular interrupts are not supported."
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-27 01:39:20 +08:00
|
|
|
/*
|
|
|
|
* Nesting of Regularing interrupts is not yet supported.
|
2016-10-29 06:27:11 +08:00
|
|
|
* Set CONFIG_NUM_IRQ_PRIO_LEVELS to 2 even if SOC supports more.
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-27 01:39:20 +08:00
|
|
|
*/
|
|
|
|
#endif
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief Work to be done before handing control to an IRQ ISR
|
2015-07-02 05:22:39 +08:00
|
|
|
*
|
|
|
|
* The processor pushes automatically all registers that need to be saved.
|
|
|
|
* However, since the processor always runs at kernel privilege there is no
|
|
|
|
* automatic switch to the IRQ stack: this must be done in software.
|
|
|
|
*
|
|
|
|
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
|
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return N/A
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
SECTION_FUNC(TEXT, _rirq_enter)
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
mov r1, _kernel
|
2016-03-12 01:29:14 +08:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
|
|
|
/* disable stack checking */
|
|
|
|
lr r2, [_ARC_V2_STATUS32]
|
|
|
|
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
|
|
|
|
kflag r2
|
|
|
|
#endif
|
2016-11-08 23:36:50 +08:00
|
|
|
ld_s r2, [r1, _kernel_offset_to_current]
|
2016-10-29 06:27:11 +08:00
|
|
|
#if NUM_REGULAR_IRQ_PRIO_LEVELS == 1
|
2016-11-08 23:36:50 +08:00
|
|
|
st sp, [r2, _thread_offset_to_sp]
|
|
|
|
ld sp, [r1, _kernel_offset_to_irq_stack]
|
2015-04-11 07:44:37 +08:00
|
|
|
#else
|
|
|
|
#error regular irq nesting is not implemented
|
|
|
|
#endif
|
|
|
|
j _isr_demux
|
|
|
|
|
|
|
|
|
2015-07-02 05:22:39 +08:00
|
|
|
/**
|
|
|
|
*
|
2015-07-02 05:51:40 +08:00
|
|
|
* @brief Work to be done exiting an IRQ
|
2015-07-02 05:22:39 +08:00
|
|
|
*
|
2015-07-02 05:29:04 +08:00
|
|
|
* @return N/A
|
2015-07-02 05:22:39 +08:00
|
|
|
*/
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
SECTION_FUNC(TEXT, _rirq_exit)
|
|
|
|
|
2016-12-15 03:34:29 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
mov r1, _kernel
|
|
|
|
ld_s r2, [r1, _kernel_offset_to_current]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-11-08 04:49:23 +08:00
|
|
|
/*
|
|
|
|
* Lock interrupts to ensure kernel queues do not change from this
|
|
|
|
* point on until return from interrupt.
|
|
|
|
*/
|
|
|
|
|
|
|
|
clri
|
|
|
|
|
2016-10-29 06:27:11 +08:00
|
|
|
#if NUM_REGULAR_IRQ_PRIO_LEVELS > 1
|
2015-04-11 07:44:37 +08:00
|
|
|
/* check if we're a nested interrupt: if so, let the interrupted interrupt
|
|
|
|
* handle the reschedule */
|
|
|
|
|
|
|
|
lr r3, [_ARC_V2_AUX_IRQ_ACT]
|
|
|
|
ffs r0, r3
|
|
|
|
|
|
|
|
asl r0, 1, r0
|
|
|
|
|
2015-06-05 21:40:47 +08:00
|
|
|
/* the OS on ARCv2 always runs in kernel mode, so assume bit31 [U] in
|
2015-04-11 07:44:37 +08:00
|
|
|
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is greater
|
|
|
|
* than FFS(AUX_IRQ_ACT), it means that another bit is set so an
|
|
|
|
* interrupt was interrupted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cmp r0, r3
|
2016-05-26 00:24:55 +08:00
|
|
|
brgt _rirq_return_from_rirq
|
2015-04-11 07:44:37 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2016-12-22 05:00:35 +08:00
|
|
|
* Non-preemptible thread ? Do not schedule (see explanation of
|
|
|
|
* preempt field in kernel_struct.h).
|
|
|
|
*/
|
|
|
|
ldh_s r0, [r2, _thread_offset_to_preempt]
|
|
|
|
mov r3, _NON_PREEMPT_THRESHOLD
|
boards: Update arc em_starterkit support from 2.2 to 2.3
Here are the main changes:
* board: Update EMSK onboard resources such as Button, Switch and LEDs
+ update soc.h for em7d, em9d, em11d
+ update board.h for em_starterkit board
* arc: Add floating point support and code density support
+ add kconfig configuration
+ add compiler options
+ add register definitions, marcos, assembly codes
+ fixes in existing codes and configurations.
* arc: Update detailed board configurations for cores of emsk 2.3
* script: Provide arc_debugger.sh for debugging em_starterkit board
+ make BOARD=em_starterkit debug
This will start openocd server for emsk, and arc gdb will connect
to this debug server, user can run `continue` command if user just
want to run the application, or other commands if debugging needed.
+ make BOARD=em_starterkit debugserver
This will start an openocd debugger server for emsk, and user can
connect to this debugserver using arc gdb and do what they want to.
+ make BOARD=em_starterkit flash
This will download the zephyr application elf file to emsk,
and run it.
Signed-off-by: Huaqi Fang <huaqi.fang@synopsys.com>
2017-05-17 15:18:51 +08:00
|
|
|
cmp_s r0, r3
|
|
|
|
bhs.d _rirq_no_reschedule
|
2016-12-22 05:00:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Both (a)reschedule and (b)non-reschedule cases need to load the
|
|
|
|
* current thread's stack, but don't have to use it until the decision
|
|
|
|
* is taken: load the delay slots with the 'load stack pointer'
|
|
|
|
* instruction.
|
2015-04-11 07:44:37 +08:00
|
|
|
*
|
|
|
|
* a) needs to load it to save outgoing context.
|
|
|
|
* b) needs to load it to restore the interrupted context.
|
|
|
|
*/
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
ld sp, [r2, _thread_offset_to_sp]
|
2016-09-30 23:02:37 +08:00
|
|
|
|
2016-11-08 04:49:23 +08:00
|
|
|
/* check if the current thread needs to be rescheduled */
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
|
boards: Update arc em_starterkit support from 2.2 to 2.3
Here are the main changes:
* board: Update EMSK onboard resources such as Button, Switch and LEDs
+ update soc.h for em7d, em9d, em11d
+ update board.h for em_starterkit board
* arc: Add floating point support and code density support
+ add kconfig configuration
+ add compiler options
+ add register definitions, marcos, assembly codes
+ fixes in existing codes and configurations.
* arc: Update detailed board configurations for cores of emsk 2.3
* script: Provide arc_debugger.sh for debugging em_starterkit board
+ make BOARD=em_starterkit debug
This will start openocd server for emsk, and arc gdb will connect
to this debug server, user can run `continue` command if user just
want to run the application, or other commands if debugging needed.
+ make BOARD=em_starterkit debugserver
This will start an openocd debugger server for emsk, and user can
connect to this debugserver using arc gdb and do what they want to.
+ make BOARD=em_starterkit flash
This will download the zephyr application elf file to emsk,
and run it.
Signed-off-by: Huaqi Fang <huaqi.fang@synopsys.com>
2017-05-17 15:18:51 +08:00
|
|
|
cmp_s r0, r2
|
|
|
|
beq _rirq_no_reschedule
|
2016-09-30 23:02:37 +08:00
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
/* cached thread to run is in r0, fall through */
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_rirq_reschedule:
|
|
|
|
|
|
|
|
/* _save_callee_saved_regs expects outgoing thread in r2 */
|
|
|
|
_save_callee_saved_regs
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-11-08 04:49:23 +08:00
|
|
|
/* incoming thread is in r0: it becomes the new 'current' */
|
2015-04-11 07:44:37 +08:00
|
|
|
mov r2, r0
|
2016-11-08 23:36:50 +08:00
|
|
|
st_s r2, [r1, _kernel_offset_to_current]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-11-04 04:11:13 +08:00
|
|
|
.balign 4
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-27 01:39:20 +08:00
|
|
|
_rirq_common_interrupt_swap:
|
|
|
|
/* r2 contains pointer to new thread */
|
|
|
|
|
2016-03-12 01:29:14 +08:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
2017-05-12 06:33:16 +08:00
|
|
|
/* Use stack top and base registers from restored context */
|
|
|
|
ld r3, [r2, _thread_offset_to_stack_base]
|
2016-03-12 01:29:14 +08:00
|
|
|
sr r3, [_ARC_V2_KSTACK_BASE]
|
2017-05-12 06:33:16 +08:00
|
|
|
ld r3, [r2, _thread_offset_to_stack_top]
|
|
|
|
sr r3, [_ARC_V2_KSTACK_TOP]
|
2016-03-12 01:29:14 +08:00
|
|
|
#endif
|
2015-04-11 07:44:37 +08:00
|
|
|
/*
|
|
|
|
* _load_callee_saved_regs expects incoming thread in r2.
|
|
|
|
* _load_callee_saved_regs restores the stack pointer.
|
|
|
|
*/
|
|
|
|
_load_callee_saved_regs
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2016-05-26 00:24:55 +08:00
|
|
|
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq
|
2015-04-11 07:44:37 +08:00
|
|
|
nop
|
2016-05-26 00:24:55 +08:00
|
|
|
breq r3, _CAUSE_FIRQ, _rirq_return_from_firq
|
2015-04-11 07:44:37 +08:00
|
|
|
nop
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_rirq_return_from_coop:
|
|
|
|
|
|
|
|
/* status32 and pc (blink) are already on the stack in the right order */
|
|
|
|
|
|
|
|
/* update status32.ie (explanation in firq_exit:_firq_return_from_coop) */
|
2016-04-30 06:44:59 +08:00
|
|
|
ld_s r0, [sp, 4]
|
2016-11-08 23:36:50 +08:00
|
|
|
ld_s r3, [r2, _thread_offset_to_intlock_key]
|
|
|
|
st 0, [r2, _thread_offset_to_intlock_key]
|
2015-04-11 07:44:37 +08:00
|
|
|
cmp r3, 0
|
|
|
|
or.ne r0, r0, _ARC_V2_STATUS32_IE
|
2016-04-30 06:44:59 +08:00
|
|
|
st_s r0, [sp, 4]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
/* carve fake stack */
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
/*
|
|
|
|
* a) status32/pc are already on the stack
|
|
|
|
* b) a real value will be pushed in r0
|
|
|
|
*/
|
|
|
|
sub sp, sp, (___isf_t_SIZEOF - 12)
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
/* push return value on stack */
|
2016-11-08 23:36:50 +08:00
|
|
|
ld_s r0, [r2, _thread_offset_to_return_value]
|
2015-04-11 07:44:37 +08:00
|
|
|
push_s r0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* r13 is part of both the callee and caller-saved register sets because
|
|
|
|
* the processor is only able to save registers in pair in the regular
|
|
|
|
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
|
|
|
|
* stack frame.
|
|
|
|
*/
|
2016-11-08 23:36:50 +08:00
|
|
|
st_s r13, [sp, ___isf_t_r13_OFFSET]
|
2015-04-11 07:44:37 +08:00
|
|
|
|
|
|
|
/* stack now has the IRQ stack frame layout, pointing to r0 */
|
|
|
|
|
|
|
|
/* fall through to rtie instruction */
|
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_rirq_return_from_firq:
|
|
|
|
_rirq_return_from_rirq:
|
|
|
|
|
|
|
|
/* rtie will pop the rest from the stack */
|
|
|
|
|
|
|
|
/* fall through to rtie instruction */
|
|
|
|
|
2016-12-15 03:34:29 +08:00
|
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
|
|
|
|
2015-04-11 07:44:37 +08:00
|
|
|
.balign 4
|
|
|
|
_rirq_no_reschedule:
|
|
|
|
|
|
|
|
rtie
|