2017-07-25 05:59:55 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <kernel.h>
|
2018-04-06 01:11:15 +08:00
|
|
|
#include <ksched.h>
|
|
|
|
#include <wait_q.h>
|
|
|
|
#include <posix/pthread.h>
|
2017-07-25 05:59:55 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
int64_t timespec_to_timeoutms(const struct timespec *abstime);
|
2019-08-27 20:37:49 +08:00
|
|
|
|
2020-05-05 16:23:55 +08:00
|
|
|
static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
|
|
|
|
k_timeout_t timeout)
|
2017-07-25 05:59:55 +08:00
|
|
|
{
|
2019-03-27 09:57:45 +08:00
|
|
|
__ASSERT(mut->lock_count == 1U, "");
|
2017-07-25 05:59:55 +08:00
|
|
|
|
|
|
|
int ret, key = irq_lock();
|
|
|
|
|
2019-03-27 09:57:45 +08:00
|
|
|
mut->lock_count = 0U;
|
2018-05-03 17:54:08 +08:00
|
|
|
mut->owner = NULL;
|
|
|
|
_ready_one_thread(&mut->wait_q);
|
2019-03-09 05:19:05 +08:00
|
|
|
ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout);
|
2017-07-25 05:59:55 +08:00
|
|
|
|
|
|
|
/* FIXME: this extra lock (and the potential context switch it
|
|
|
|
* can cause) could be optimized out. At the point of the
|
|
|
|
* signal/broadcast, it's possible to detect whether or not we
|
|
|
|
* will be swapping back to this particular thread and lock it
|
|
|
|
* (i.e. leave the lock variable unchanged) on our behalf.
|
|
|
|
* But that requires putting scheduler intelligence into this
|
|
|
|
* higher level abstraction and is probably not worth it.
|
|
|
|
*/
|
|
|
|
pthread_mutex_lock(mut);
|
|
|
|
|
2018-02-08 11:20:06 +08:00
|
|
|
return ret == -EAGAIN ? ETIMEDOUT : ret;
|
2017-07-25 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This implements a "fair" scheduling policy: at the end of a POSIX
|
|
|
|
* thread call that might result in a change of the current maximum
|
|
|
|
* priority thread, we always check and context switch if needed.
|
|
|
|
* Note that there is significant dispute in the community over the
|
|
|
|
* "right" way to do this and different systems do it differently by
|
|
|
|
* default. Zephyr is an RTOS, so we choose latency over
|
|
|
|
* throughput. See here for a good discussion of the broad issue:
|
|
|
|
*
|
|
|
|
* https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
|
|
|
|
*/
|
|
|
|
|
|
|
|
int pthread_cond_signal(pthread_cond_t *cv)
|
|
|
|
{
|
|
|
|
int key = irq_lock();
|
|
|
|
|
2018-04-11 23:21:26 +08:00
|
|
|
_ready_one_thread(&cv->wait_q);
|
2019-03-09 05:19:05 +08:00
|
|
|
z_reschedule_irqlock(key);
|
2017-07-25 05:59:55 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_broadcast(pthread_cond_t *cv)
|
|
|
|
{
|
|
|
|
int key = irq_lock();
|
|
|
|
|
2019-03-09 05:19:05 +08:00
|
|
|
while (z_waitq_head(&cv->wait_q)) {
|
2018-04-11 23:21:26 +08:00
|
|
|
_ready_one_thread(&cv->wait_q);
|
2017-07-25 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
2019-03-09 05:19:05 +08:00
|
|
|
z_reschedule_irqlock(key);
|
2017-07-25 05:59:55 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
|
|
|
|
{
|
|
|
|
return cond_wait(cv, mut, K_FOREVER);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut,
|
2019-08-27 20:37:49 +08:00
|
|
|
const struct timespec *abstime)
|
2017-07-25 05:59:55 +08:00
|
|
|
{
|
2020-05-28 00:26:57 +08:00
|
|
|
int32_t timeout = (int32_t)timespec_to_timeoutms(abstime);
|
2020-05-05 16:23:55 +08:00
|
|
|
return cond_wait(cv, mut, K_MSEC(timeout));
|
2017-07-25 05:59:55 +08:00
|
|
|
}
|