diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index a7a55ac264..e7cd1016b7 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -151,7 +151,7 @@ int sched_lock(void) if (rtcb && !up_interrupt_context()) { - /* Catch attempts to increment the lockcount beyound the range of the + /* Catch attempts to increment the lockcount beyond the range of the * integer type. */ diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index df05975f50..e29cc1d906 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -101,7 +101,7 @@ int sched_unlock(void) if (rtcb->lockcount <= 0) { #ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - /* Note that we no longer have pre-emption */ + /* Note that we no longer have pre-emption disabled. */ sched_note_premption(rtcb, false); #endif diff --git a/sched/semaphore/spinlock.c b/sched/semaphore/spinlock.c index 2d667c0ed3..6d58dfcc3c 100644 --- a/sched/semaphore/spinlock.c +++ b/sched/semaphore/spinlock.c @@ -468,7 +468,7 @@ void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu, irqstate_t flags; /* Disable local interrupts to prevent being re-entered from an interrupt - * on the same CPU. This does not effect the behavior on other CPUs. + * on the same CPU. This may not effect interrupt behavior on other CPUs. */ flags = up_irq_save(); @@ -527,7 +527,7 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, irqstate_t flags; /* Disable local interrupts to prevent being re-entered from an interrupt - * on the same CPU. This does not effect the behavior on other CPUs. + * on the same CPU. This may not effect interrupt behavior on other CPUs. */ flags = up_irq_save();