csection: We can save execution time by removing judgments.
test: We can use qemu for testing. compiling make distclean -j20; ./tools/configure.sh -l qemu-armv8a:nsh_smp ;make -j20 running qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine virt,virtualization=on,gic-version=3 -net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline -kernel ./nuttx or compiling make distclean -j20; ./tools/configure.sh -l sabre-6quad:smp ;make -j20 running qemu-system-arm -semihosting -M sabrelite -m 1024 -smp 4 -kernel nuttx/nuttx -nographic Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
ace5dde1a9
commit
9a36b8b823
|
@ -188,12 +188,6 @@ irqstate_t enter_critical_section(void)
|
|||
try_again:
|
||||
ret = up_irq_save();
|
||||
|
||||
/* Verify that the system has sufficiently initialized so that the task
|
||||
* lists are valid.
|
||||
*/
|
||||
|
||||
if (nxsched_get_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
/* If called from an interrupt handler, then just take the spinlock.
|
||||
* If we are already in a critical section, this will lock the CPU
|
||||
* in the interrupt handler. Sounds worse than it is.
|
||||
|
@ -395,7 +389,6 @@ try_again_in_irq:
|
|||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Return interrupt status */
|
||||
|
||||
|
@ -412,11 +405,9 @@ irqstate_t enter_critical_section(void)
|
|||
|
||||
ret = up_irq_save();
|
||||
|
||||
/* Check if we were called from an interrupt handler and that the task
|
||||
* lists have been initialized.
|
||||
*/
|
||||
/* Check if we were called from an interrupt handler */
|
||||
|
||||
if (!up_interrupt_context() && nxsched_get_initstate() >= OSINIT_TASKLISTS)
|
||||
if (!up_interrupt_context())
|
||||
{
|
||||
FAR struct tcb_s *rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
|
@ -459,12 +450,6 @@ void leave_critical_section(irqstate_t flags)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
/* Verify that the system has sufficiently initialized so that the task
|
||||
* lists are valid.
|
||||
*/
|
||||
|
||||
if (nxsched_get_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
/* If called from an interrupt handler, then just release the
|
||||
* spinlock. The interrupt handling logic should already hold the
|
||||
* spinlock if enter_critical_section() has been called. Unlocking
|
||||
|
@ -562,7 +547,6 @@ void leave_critical_section(irqstate_t flags)
|
|||
/* Have all CPUs released the lock? */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Restore the previous interrupt state which may still be interrupts
|
||||
* disabled (but we don't have a mechanism to verify that now)
|
||||
|
@ -579,7 +563,7 @@ void leave_critical_section(irqstate_t flags)
|
|||
* lists have been initialized.
|
||||
*/
|
||||
|
||||
if (!up_interrupt_context() && nxsched_get_initstate() >= OSINIT_TASKLISTS)
|
||||
if (!up_interrupt_context())
|
||||
{
|
||||
FAR struct tcb_s *rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
|
|
Loading…
Reference in New Issue