2016-06-22 03:15:33 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _ASMLANGUAGE
|
|
|
|
#include <arch/nios2/asm.h>
|
2016-11-08 23:36:50 +08:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <offsets_short.h>
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* exports */
|
|
|
|
GTEXT(_exception)
|
|
|
|
|
|
|
|
/* import */
|
|
|
|
GTEXT(_Fault)
|
|
|
|
GTEXT(_Swap)
|
2016-06-24 04:49:50 +08:00
|
|
|
#ifdef CONFIG_IRQ_OFFLOAD
|
|
|
|
GTEXT(_irq_do_offload)
|
|
|
|
GTEXT(_offload_routine)
|
|
|
|
#endif
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Allows use of r1/at register, otherwise reserved for assembler use */
|
|
|
|
.set noat
|
|
|
|
|
|
|
|
/* Placed into special 'exception' section so that the linker can put this code
|
|
|
|
* at ALT_CPU_EXCEPTION_ADDR defined in system.h
|
|
|
|
*
|
|
|
|
* This is the common entry point for processor exceptions and interrupts from
|
|
|
|
* the Internal Interrupt Controller (IIC).
|
|
|
|
*
|
|
|
|
* If the External (EIC) controller is in use, then we will never get here on
|
|
|
|
* behalf of an interrupt, instead the EIC driver will have set up a vector
|
|
|
|
* table and the processor will jump directly into the appropriate table
|
|
|
|
* entry.
|
|
|
|
*/
|
|
|
|
SECTION_FUNC(exception.entry, _exception)
|
|
|
|
/* Reserve thread stack space for saving context */
|
2016-06-29 01:20:30 +08:00
|
|
|
subi sp, sp, __NANO_ESF_SIZEOF
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Preserve all caller-saved registers onto the thread's stack */
|
2016-06-29 01:20:30 +08:00
|
|
|
stw ra, __NANO_ESF_ra_OFFSET(sp)
|
|
|
|
stw r1, __NANO_ESF_r1_OFFSET(sp)
|
|
|
|
stw r2, __NANO_ESF_r2_OFFSET(sp)
|
|
|
|
stw r3, __NANO_ESF_r3_OFFSET(sp)
|
|
|
|
stw r4, __NANO_ESF_r4_OFFSET(sp)
|
|
|
|
stw r5, __NANO_ESF_r5_OFFSET(sp)
|
|
|
|
stw r6, __NANO_ESF_r6_OFFSET(sp)
|
|
|
|
stw r7, __NANO_ESF_r7_OFFSET(sp)
|
|
|
|
stw r8, __NANO_ESF_r8_OFFSET(sp)
|
|
|
|
stw r9, __NANO_ESF_r9_OFFSET(sp)
|
|
|
|
stw r10, __NANO_ESF_r10_OFFSET(sp)
|
|
|
|
stw r11, __NANO_ESF_r11_OFFSET(sp)
|
|
|
|
stw r12, __NANO_ESF_r12_OFFSET(sp)
|
|
|
|
stw r13, __NANO_ESF_r13_OFFSET(sp)
|
|
|
|
stw r14, __NANO_ESF_r14_OFFSET(sp)
|
|
|
|
stw r15, __NANO_ESF_r15_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Store value of estatus control register */
|
|
|
|
rdctl et, estatus
|
2016-06-29 01:20:30 +08:00
|
|
|
stw et, __NANO_ESF_estatus_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* ea-4 is the address of the instruction when the exception happened,
|
|
|
|
* put this in the stack frame as well
|
|
|
|
*/
|
|
|
|
addi r15, ea, -4
|
2016-06-29 01:20:30 +08:00
|
|
|
stw r15, __NANO_ESF_instr_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Figure out whether we are here because of an interrupt or an
|
|
|
|
* exception. If an interrupt, switch stacks and enter IRQ handling
|
|
|
|
* code. If an exception, remain on current stack and enter exception
|
|
|
|
* handing code. From the CPU manual, ipending must be nonzero and
|
|
|
|
* estatis.PIE must be enabled for this to be considered an interrupt.
|
|
|
|
*
|
|
|
|
* Stick ipending in r4 since it will be an arg for _enter_irq
|
|
|
|
*/
|
|
|
|
rdctl r4, ipending
|
|
|
|
beq r4, zero, not_interrupt
|
|
|
|
/* We stashed estatus in et earlier */
|
|
|
|
andi r15, et, 1
|
|
|
|
beq r15, zero, not_interrupt
|
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
is_interrupt:
|
2016-06-22 03:15:33 +08:00
|
|
|
/* If we get here, this is an interrupt */
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
/* Grab a reference to _kernel in r10 so we can determine the
|
2016-06-22 03:15:33 +08:00
|
|
|
* current irq stack pointer
|
|
|
|
*/
|
2016-11-08 23:36:50 +08:00
|
|
|
movhi r10, %hi(_kernel)
|
|
|
|
ori r10, r10, %lo(_kernel)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Stash a copy of thread's sp in r12 so that we can put it on the IRQ
|
|
|
|
* stack
|
|
|
|
*/
|
|
|
|
mov r12, sp
|
|
|
|
|
|
|
|
/* Switch to interrupt stack */
|
2016-11-08 23:36:50 +08:00
|
|
|
ldw sp, _kernel_offset_to_irq_stack(r10)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Store thread stack pointer onto IRQ stack */
|
|
|
|
addi sp, sp, -4
|
|
|
|
stw r12, 0(sp)
|
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
on_irq_stack:
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Enter C interrupt handling code. Value of ipending will be the
|
|
|
|
* function parameter since we put it in r4
|
|
|
|
*/
|
|
|
|
call _enter_irq
|
|
|
|
|
|
|
|
/* Interrupt handler finished and the interrupt should be serviced
|
|
|
|
* now, the appropriate bits in ipending should be cleared */
|
|
|
|
|
2016-11-08 23:36:50 +08:00
|
|
|
/* Get a reference to _kernel again in r10 */
|
|
|
|
movhi r10, %hi(_kernel)
|
|
|
|
ori r10, r10, %lo(_kernel)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
2016-12-15 03:34:29 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
2016-11-08 23:36:50 +08:00
|
|
|
ldw r11, _kernel_offset_to_current(r10)
|
2016-10-26 02:47:52 +08:00
|
|
|
/* Determine whether the exception of the ISR requires context
|
|
|
|
* switch
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Do not reschedule coop threads (threads that have negative prio) */
|
2016-11-08 23:36:50 +08:00
|
|
|
ldw r12, _thread_offset_to_prio(r11)
|
2016-10-26 02:47:52 +08:00
|
|
|
blt r12, zero, no_reschedule
|
|
|
|
|
|
|
|
/* Do not reschedule if scheduler is locked */
|
2016-11-08 23:36:50 +08:00
|
|
|
ldw r12, _thread_offset_to_sched_locked(r11)
|
2016-10-26 02:47:52 +08:00
|
|
|
bne r12, zero, no_reschedule
|
|
|
|
|
|
|
|
/* Call into the kernel to see if a scheduling decision is necessary */
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 23:37:27 +08:00
|
|
|
ldw r2, _kernel_offset_to_ready_q_cache(r10)
|
|
|
|
beq r2, r11, no_reschedule
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A context reschedule is required: keep the volatile registers of
|
|
|
|
* the interrupted thread on the context's stack. Utilize
|
|
|
|
* the existing _Swap() primitive to save the remaining
|
|
|
|
* thread's registers (including floating point) and perform
|
|
|
|
* a switch to the new thread.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* We put the thread stack pointer on top of the IRQ stack before
|
|
|
|
* we switched stacks. Restore it to go back to thread stack
|
|
|
|
*/
|
|
|
|
ldw sp, 0(sp)
|
|
|
|
|
|
|
|
/* Argument to Swap() is estatus since that's the state of the
|
|
|
|
* status register before the exception happened. When coming
|
|
|
|
* out of the context switch we need this info to restore
|
|
|
|
* IRQ lock state. We put this value in et earlier.
|
|
|
|
*/
|
|
|
|
mov r4, et
|
|
|
|
|
|
|
|
call _Swap
|
|
|
|
jmpi _exception_exit
|
2016-12-15 03:34:29 +08:00
|
|
|
#else
|
|
|
|
jmpi no_reschedule
|
|
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
2016-06-22 03:15:33 +08:00
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
not_interrupt:
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Since this wasn't an interrupt we're not going to restart the
|
2016-06-29 01:26:40 +08:00
|
|
|
* faulting instruction.
|
2016-06-22 03:15:33 +08:00
|
|
|
*
|
|
|
|
* We earlier put ea - 4 in the stack frame, replace it with just ea
|
|
|
|
*/
|
2016-06-29 01:20:30 +08:00
|
|
|
stw ea, __NANO_ESF_instr_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
2016-06-24 04:49:50 +08:00
|
|
|
#ifdef CONFIG_IRQ_OFFLOAD
|
|
|
|
/* Check the contents of _offload_routine. If non-NULL, jump into
|
|
|
|
* the interrupt code anyway.
|
|
|
|
*/
|
|
|
|
movhi r10, %hi(_offload_routine)
|
|
|
|
ori r10, r10, %lo(_offload_routine)
|
|
|
|
ldw r11, (r10)
|
|
|
|
bne r11, zero, is_interrupt
|
|
|
|
#endif
|
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
_exception_enter_fault:
|
2016-06-22 03:15:33 +08:00
|
|
|
/* If we get here, the exception wasn't in interrupt or an
|
2016-06-29 01:26:40 +08:00
|
|
|
* invocation of irq_oflload(). Let _Fault() handle it in
|
2016-06-22 03:15:33 +08:00
|
|
|
* C domain
|
|
|
|
*/
|
|
|
|
|
2016-06-29 00:51:55 +08:00
|
|
|
mov r4, sp
|
2016-06-22 03:15:33 +08:00
|
|
|
call _Fault
|
|
|
|
jmpi _exception_exit
|
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
no_reschedule:
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* We put the thread stack pointer on top of the IRQ stack before
|
|
|
|
* we switched stacks. Restore it to go back to thread stack
|
|
|
|
*/
|
|
|
|
ldw sp, 0(sp)
|
|
|
|
|
|
|
|
/* Fall through */
|
|
|
|
|
2016-08-19 00:25:00 +08:00
|
|
|
_exception_exit:
|
2016-06-22 03:15:33 +08:00
|
|
|
/* We are on the thread stack. Restore all saved registers
|
|
|
|
* and return to the interrupted context */
|
|
|
|
|
|
|
|
/* Return address from the exception */
|
2016-06-29 01:20:30 +08:00
|
|
|
ldw ea, __NANO_ESF_instr_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Restore estatus
|
|
|
|
* XXX is this right??? */
|
2016-06-29 01:20:30 +08:00
|
|
|
ldw r5, __NANO_ESF_estatus_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
wrctl estatus, r5
|
|
|
|
|
|
|
|
/* Restore caller-saved registers */
|
2016-06-29 01:20:30 +08:00
|
|
|
ldw ra, __NANO_ESF_ra_OFFSET(sp)
|
|
|
|
ldw r1, __NANO_ESF_r1_OFFSET(sp)
|
|
|
|
ldw r2, __NANO_ESF_r2_OFFSET(sp)
|
|
|
|
ldw r3, __NANO_ESF_r3_OFFSET(sp)
|
|
|
|
ldw r4, __NANO_ESF_r4_OFFSET(sp)
|
|
|
|
ldw r5, __NANO_ESF_r5_OFFSET(sp)
|
|
|
|
ldw r6, __NANO_ESF_r6_OFFSET(sp)
|
|
|
|
ldw r7, __NANO_ESF_r7_OFFSET(sp)
|
|
|
|
ldw r8, __NANO_ESF_r8_OFFSET(sp)
|
|
|
|
ldw r9, __NANO_ESF_r9_OFFSET(sp)
|
|
|
|
ldw r10, __NANO_ESF_r10_OFFSET(sp)
|
|
|
|
ldw r11, __NANO_ESF_r11_OFFSET(sp)
|
|
|
|
ldw r12, __NANO_ESF_r12_OFFSET(sp)
|
|
|
|
ldw r13, __NANO_ESF_r13_OFFSET(sp)
|
|
|
|
ldw r14, __NANO_ESF_r14_OFFSET(sp)
|
|
|
|
ldw r15, __NANO_ESF_r15_OFFSET(sp)
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* Put the stack pointer back where it was when we entered
|
|
|
|
* exception state
|
|
|
|
*/
|
2016-06-29 01:20:30 +08:00
|
|
|
addi sp, sp, __NANO_ESF_SIZEOF
|
2016-06-22 03:15:33 +08:00
|
|
|
|
|
|
|
/* All done, copy estatus into status and transfer to ea */
|
|
|
|
eret
|