Add logic to disable cancellation points within the OS. This is useful when an internal OS function that is NOT a cancellation point calls an OS function which is a cancellation point. In that case, irrecoverable states may occur if the cancellation is within the OS.
This commit is contained in:
parent
3fb730040b
commit
b4747286b1
|
@ -121,6 +121,14 @@ void pthread_mutex_inconsistent(FAR struct pthread_tcb_s *tcb);
|
||||||
# define pthread_mutex_give(m) pthread_givesemaphore(&(m)->sem)
|
# define pthread_mutex_give(m) pthread_givesemaphore(&(m)->sem)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CANCELLATION_POINTS
|
||||||
|
uint16_t pthread_disable_cancel(void);
|
||||||
|
void pthread_enable_cancel(uint16_t oldstate);
|
||||||
|
#else
|
||||||
|
# define pthread_disable_cancel() (0)
|
||||||
|
# define pthread_enable_cancel(s) UNUSED(s)
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||||
int pthread_mutexattr_verifytype(int type);
|
int pthread_mutexattr_verifytype(int type);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -167,9 +167,10 @@ int pthread_cond_timedwait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex,
|
||||||
FAR const struct timespec *abstime)
|
FAR const struct timespec *abstime)
|
||||||
{
|
{
|
||||||
FAR struct tcb_s *rtcb = this_task();
|
FAR struct tcb_s *rtcb = this_task();
|
||||||
|
irqstate_t flags;
|
||||||
|
uint16_t oldstate;
|
||||||
int ticks;
|
int ticks;
|
||||||
int mypid = (int)getpid();
|
int mypid = (int)getpid();
|
||||||
irqstate_t flags;
|
|
||||||
int ret = OK;
|
int ret = OK;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
|
@ -316,7 +317,11 @@ int pthread_cond_timedwait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex,
|
||||||
/* Reacquire the mutex (retaining the ret). */
|
/* Reacquire the mutex (retaining the ret). */
|
||||||
|
|
||||||
sinfo("Re-locking...\n");
|
sinfo("Re-locking...\n");
|
||||||
|
|
||||||
|
oldstate = pthread_disable_cancel();
|
||||||
status = pthread_mutex_take(mutex, false);
|
status = pthread_mutex_take(mutex, false);
|
||||||
|
pthread_enable_cancel(oldstate);
|
||||||
|
|
||||||
if (status == OK)
|
if (status == OK)
|
||||||
{
|
{
|
||||||
mutex->pid = mypid;
|
mutex->pid = mypid;
|
||||||
|
|
|
@ -95,6 +95,8 @@ int pthread_cond_wait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
uint16_t oldstate;
|
||||||
|
|
||||||
/* Give up the mutex */
|
/* Give up the mutex */
|
||||||
|
|
||||||
sinfo("Give up mutex / take cond\n");
|
sinfo("Give up mutex / take cond\n");
|
||||||
|
@ -117,12 +119,17 @@ int pthread_cond_wait(FAR pthread_cond_t *cond, FAR pthread_mutex_t *mutex)
|
||||||
|
|
||||||
/* Reacquire the mutex.
|
/* Reacquire the mutex.
|
||||||
*
|
*
|
||||||
* REVISIT: When cancellation points are enabled, we will almost
|
* When cancellation points are enabled, we need to
|
||||||
* certainly hold the mutex when the pthread is canceled.
|
* hold the mutex when the pthread is canceled and
|
||||||
|
* cleanup handlers, if any, are entered.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sinfo("Reacquire mutex...\n");
|
sinfo("Reacquire mutex...\n");
|
||||||
|
|
||||||
|
oldstate = pthread_disable_cancel();
|
||||||
status = pthread_mutex_take(mutex, false);
|
status = pthread_mutex_take(mutex, false);
|
||||||
|
pthread_enable_cancel(oldstate);
|
||||||
|
|
||||||
if (ret == OK)
|
if (ret == OK)
|
||||||
{
|
{
|
||||||
/* Report the first failure that occurs */
|
/* Report the first failure that occurs */
|
||||||
|
|
|
@ -95,8 +95,8 @@ static void pthread_mutex_add(FAR struct pthread_mutex_s *mutex)
|
||||||
* mutex to the list of mutexes held by this thread.
|
* mutex to the list of mutexes held by this thread.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* mutex - The mux to be locked
|
* mutex - The mutex to be locked
|
||||||
* intr - false: ignore EINTR errors when locking; true tread EINTR as
|
* intr - false: ignore EINTR errors when locking; true treat EINTR as
|
||||||
* other errors by returning the errno value
|
* other errors by returning the errno value
|
||||||
*
|
*
|
||||||
* Return Value:
|
* Return Value:
|
||||||
|
@ -126,15 +126,11 @@ int pthread_mutex_take(FAR struct pthread_mutex_s *mutex, bool intr)
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* Take semaphore underlying the mutex. pthread_takesemaphore
|
/* Take semaphore underlying the mutex. pthread_takesemaphore
|
||||||
* returns zero on success and a positive errno value on failue.
|
* returns zero on success and a positive errno value on failure.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ret = pthread_takesemaphore(&mutex->sem, intr);
|
ret = pthread_takesemaphore(&mutex->sem, intr);
|
||||||
if (ret != OK)
|
if (ret == OK)
|
||||||
{
|
|
||||||
ret = get_errno();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
/* Check if the holder of the mutex has terminated without
|
/* Check if the holder of the mutex has terminated without
|
||||||
* releasing. In that case, the state of the mutex is
|
* releasing. In that case, the state of the mutex is
|
||||||
|
@ -169,8 +165,8 @@ int pthread_mutex_take(FAR struct pthread_mutex_s *mutex, bool intr)
|
||||||
* mutex to the list of mutexes held by this thread.
|
* mutex to the list of mutexes held by this thread.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* mutex - The mux to be locked
|
* mutex - The mutex to be locked
|
||||||
* intr - false: ignore EINTR errors when locking; true tread EINTR as
|
* intr - false: ignore EINTR errors when locking; true treat EINTR as
|
||||||
* other errors by returning the errno value
|
* other errors by returning the errno value
|
||||||
*
|
*
|
||||||
* Return Value:
|
* Return Value:
|
||||||
|
@ -283,3 +279,70 @@ int pthread_mutex_give(FAR struct pthread_mutex_s *mutex)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* Name: pthread_disable_cancel() and pthread_enable_cancel()
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Temporarily disable cancellation and return old cancel state, which
|
||||||
|
* can later be restored. This is useful when a cancellation point
|
||||||
|
* function is called from within the OS by a non-cancellation point:
|
||||||
|
* In certain such cases, we need to defer the cancellation to prevent
|
||||||
|
* bad things from happening.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* saved cancel flags for pthread_enable_cancel()
|
||||||
|
*
|
||||||
|
* Return Value:
|
||||||
|
* old cancel flags for pthread_disable_cancel()
|
||||||
|
*
|
||||||
|
****************************************************************************/
|
||||||
|
|
||||||
|
#ifdef CONFIG_CANCELLATION_POINTS
|
||||||
|
uint16_t pthread_disable_cancel(void)
|
||||||
|
{
|
||||||
|
FAR struct pthread_tcb_s *tcb = (FAR struct pthread_tcb_s *)this_task();
|
||||||
|
irqstate_t flags;
|
||||||
|
uint16_t old;
|
||||||
|
|
||||||
|
/* We need perform the following operations from within a critical section
|
||||||
|
* because it can compete with interrupt level activity.
|
||||||
|
*/
|
||||||
|
|
||||||
|
flags = enter_critical_section();
|
||||||
|
old = tcb->cmn.flags & (TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||||
|
tcb->cmn.flags &= ~(TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||||
|
leave_critical_section(flags);
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
void pthread_enable_cancel(uint16_t cancelflags)
|
||||||
|
{
|
||||||
|
FAR struct pthread_tcb_s *tcb = (FAR struct pthread_tcb_s *)this_task();
|
||||||
|
irqstate_t flags;
|
||||||
|
|
||||||
|
/* We need perform the following operations from within a critical section
|
||||||
|
* because it can compete with interrupt level activity.
|
||||||
|
*/
|
||||||
|
|
||||||
|
flags = enter_critical_section();
|
||||||
|
tcb->cmn.flags |= cancelflags;
|
||||||
|
|
||||||
|
/* What should we do if there is a pending cancellation?
|
||||||
|
*
|
||||||
|
* If the thread is executing with deferred cancellation, we need do
|
||||||
|
* nothing more; the cancellation cannot occur until the next
|
||||||
|
* cancellation point.
|
||||||
|
*
|
||||||
|
* However, if the thread is executing in asynchronous cancellation mode,
|
||||||
|
* then we need to terminate now by simply calling pthread_exit().
|
||||||
|
*/
|
||||||
|
|
||||||
|
if ((tcb->cmn.flags & TCB_FLAG_CANCEL_DEFERRED) == 0 &&
|
||||||
|
(tcb->cmn.flags & TCB_FLAG_CANCEL_PENDING) != 0)
|
||||||
|
{
|
||||||
|
pthread_exit(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
leave_critical_section(flags);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_CANCELLATION_POINTS */
|
||||||
|
|
Loading…
Reference in New Issue