kernel: inline z_unpend_first_thread()

Inlining z_unpend_first_thread() has been observed to give a
+8% and +16% performance boost to the thread_metric benchmark's
message processing and synchronization tests respectively.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-10-17 10:05:01 -07:00 committed by Maureen Helm
parent 0bf44f2352
commit f6a76c32b7
2 changed files with 33 additions and 31 deletions

View File

@ -13,6 +13,7 @@
#include <kthread.h>
#include <zephyr/tracing/tracing.h>
#include <stdbool.h>
#include <priority_q.h>
BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
>= K_HIGHEST_APPLICATION_THREAD_PRIO);
@ -37,6 +38,8 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif /* CONFIG_MULTITHREADING */
extern struct k_spinlock _sched_spinlock;
extern struct k_thread _thread_dummy;
void z_sched_init(void);
@ -49,7 +52,6 @@ void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void z_reschedule_irqlock(uint32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
bool z_thread_prio_set(struct k_thread *thread, int prio);
@ -142,6 +144,36 @@ static inline void z_sched_lock(void)
compiler_barrier();
}
static ALWAYS_INLINE _wait_q_t *pended_on_thread(struct k_thread *thread)
{
__ASSERT_NO_MSG(thread->base.pended_on);
return thread->base.pended_on;
}
static inline void unpend_thread_no_timeout(struct k_thread *thread)
{
_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
z_mark_thread_as_not_pending(thread);
thread->base.pended_on = NULL;
}
static ALWAYS_INLINE struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = NULL;
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (unlikely(thread != NULL)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}
}
return thread;
}
/*
* APIs for working with the Zephyr kernel scheduler. Intended for use in
* management of IPC objects, either in the core kernel or other IPC

View File

@ -554,13 +554,6 @@ static inline void z_vrfy_k_thread_resume(k_tid_t thread)
#include <zephyr/syscalls/k_thread_resume_mrsh.c>
#endif /* CONFIG_USERSPACE */
static _wait_q_t *pended_on_thread(struct k_thread *thread)
{
__ASSERT_NO_MSG(thread->base.pended_on);
return thread->base.pended_on;
}
static void unready_thread(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
@ -609,13 +602,6 @@ void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
}
}
static inline void unpend_thread_no_timeout(struct k_thread *thread)
{
_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
z_mark_thread_as_not_pending(thread);
thread->base.pended_on = NULL;
}
void z_unpend_thread_no_timeout(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
@ -704,22 +690,6 @@ struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
return thread;
}
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = NULL;
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (unlikely(thread != NULL)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}
}
return thread;
}
void z_unpend_thread(struct k_thread *thread)
{
z_unpend_thread_no_timeout(thread);