2014-10-10 20:22:51 +08:00
|
|
|
/****************************************************************************
|
2021-06-25 14:24:45 +08:00
|
|
|
* sched/wqueue/kwork_thread.c
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
2020-03-12 04:23:38 +08:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
2020-03-12 04:23:38 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
2020-03-12 04:23:38 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2018-08-26 04:52:13 +08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <sched.h>
|
2021-06-25 14:24:45 +08:00
|
|
|
#include <stdio.h>
|
2018-08-26 04:52:13 +08:00
|
|
|
#include <string.h>
|
2021-06-25 14:24:45 +08:00
|
|
|
#include <stdlib.h>
|
2014-10-10 23:34:03 +08:00
|
|
|
#include <errno.h>
|
2021-05-18 14:59:14 +08:00
|
|
|
#include <assert.h>
|
2014-10-10 23:34:03 +08:00
|
|
|
#include <debug.h>
|
|
|
|
|
2022-09-25 23:08:38 +08:00
|
|
|
#include <nuttx/queue.h>
|
2014-10-10 20:22:51 +08:00
|
|
|
#include <nuttx/wqueue.h>
|
2014-10-10 23:34:03 +08:00
|
|
|
#include <nuttx/kthread.h>
|
2021-06-19 17:29:30 +08:00
|
|
|
#include <nuttx/semaphore.h>
|
2014-10-10 20:22:51 +08:00
|
|
|
|
2023-04-07 18:52:57 +08:00
|
|
|
#include "sched/sched.h"
|
2014-10-10 20:22:51 +08:00
|
|
|
#include "wqueue/wqueue.h"
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
#if defined(CONFIG_SCHED_WORKQUEUE)
|
2014-10-10 20:22:51 +08:00
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-04-05 14:49:11 +08:00
|
|
|
#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE
|
|
|
|
# define CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE > 0
|
2021-06-19 17:29:30 +08:00
|
|
|
# define CALL_WORKER(worker, arg) \
|
|
|
|
do \
|
|
|
|
{ \
|
2023-09-28 16:37:27 +08:00
|
|
|
clock_t start; \
|
|
|
|
clock_t elapsed; \
|
|
|
|
start = perf_gettime(); \
|
2021-06-19 17:29:30 +08:00
|
|
|
worker(arg); \
|
2023-09-28 16:37:27 +08:00
|
|
|
elapsed = perf_gettime() - start; \
|
2021-06-19 17:29:30 +08:00
|
|
|
if (elapsed > CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE) \
|
|
|
|
{ \
|
2023-09-28 16:37:27 +08:00
|
|
|
CRITMONITOR_PANIC("WORKER %p execute too long %ju\n", \
|
|
|
|
worker, (uintmax_t)elapsed); \
|
2021-06-19 17:29:30 +08:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
#else
|
|
|
|
# define CALL_WORKER(worker, arg) worker(arg)
|
|
|
|
#endif
|
|
|
|
|
2014-10-10 20:22:51 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Data
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
#if defined(CONFIG_SCHED_HPWORK)
|
2018-08-26 04:52:13 +08:00
|
|
|
/* The state of the kernel mode, high priority work queue(s). */
|
2014-10-10 22:35:58 +08:00
|
|
|
|
2021-12-01 23:40:16 +08:00
|
|
|
struct hp_wqueue_s g_hpwork =
|
|
|
|
{
|
2022-09-03 20:04:11 +08:00
|
|
|
{NULL, NULL},
|
2022-10-19 23:57:25 +08:00
|
|
|
SEM_INITIALIZER(0),
|
2021-12-01 23:40:16 +08:00
|
|
|
};
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
#endif /* CONFIG_SCHED_HPWORK */
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_LPWORK)
|
|
|
|
/* The state of the kernel mode, low priority work queue(s). */
|
|
|
|
|
2021-12-01 23:40:16 +08:00
|
|
|
struct lp_wqueue_s g_lpwork =
|
|
|
|
{
|
2022-09-03 20:04:11 +08:00
|
|
|
{NULL, NULL},
|
2022-10-19 23:57:25 +08:00
|
|
|
SEM_INITIALIZER(0),
|
2021-12-01 23:40:16 +08:00
|
|
|
};
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
#endif /* CONFIG_SCHED_LPWORK */
|
2014-10-10 22:35:58 +08:00
|
|
|
|
2014-10-10 20:22:51 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2021-06-25 14:24:45 +08:00
|
|
|
* Name: work_thread
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
|
|
|
* Description:
|
2021-06-19 17:29:30 +08:00
|
|
|
* These are the worker threads that perform the actions placed on the
|
2020-03-12 04:23:38 +08:00
|
|
|
* high priority work queue.
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
2018-08-26 04:52:13 +08:00
|
|
|
* These, along with the lower priority worker thread(s) are the kernel
|
2021-06-19 17:29:30 +08:00
|
|
|
* mode work queues (also built in the flat build).
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
|
|
|
* All kernel mode worker threads are started by the OS during normal
|
|
|
|
* bring up. This entry point is referenced by OS internally and should
|
|
|
|
* not be accessed by application logic.
|
|
|
|
*
|
2018-02-02 00:00:02 +08:00
|
|
|
* Input Parameters:
|
2021-06-19 17:29:30 +08:00
|
|
|
* argc, argv
|
2014-10-10 20:22:51 +08:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Does not return
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
static int work_thread(int argc, FAR char *argv[])
|
2014-10-10 20:22:51 +08:00
|
|
|
{
|
2021-06-19 17:29:30 +08:00
|
|
|
FAR struct kwork_wqueue_s *wqueue;
|
2023-08-28 19:29:54 +08:00
|
|
|
FAR struct kworker_s *kworker;
|
2021-06-19 17:29:30 +08:00
|
|
|
FAR struct work_s *work;
|
2022-12-15 07:02:44 +08:00
|
|
|
worker_t worker;
|
2021-06-19 17:29:30 +08:00
|
|
|
irqstate_t flags;
|
|
|
|
FAR void *arg;
|
2023-08-28 19:29:54 +08:00
|
|
|
int semcount;
|
2018-08-26 04:52:13 +08:00
|
|
|
|
2023-08-28 19:29:54 +08:00
|
|
|
/* Get the handle from argv */
|
|
|
|
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)
|
|
|
|
((uintptr_t)strtoul(argv[1], NULL, 0));
|
|
|
|
kworker = (FAR struct kworker_s *)
|
|
|
|
((uintptr_t)strtoul(argv[2], NULL, 0));
|
2021-06-19 17:29:30 +08:00
|
|
|
|
|
|
|
flags = enter_critical_section();
|
2018-08-26 04:52:13 +08:00
|
|
|
|
2014-10-10 20:22:51 +08:00
|
|
|
/* Loop forever */
|
|
|
|
|
2015-10-08 09:59:14 +08:00
|
|
|
for (; ; )
|
2014-10-10 20:22:51 +08:00
|
|
|
{
|
2021-06-19 17:29:30 +08:00
|
|
|
/* And check each entry in the work queue. Since we have disabled
|
|
|
|
* interrupts we know: (1) we will not be suspended unless we do
|
|
|
|
* so ourselves, and (2) there will be no changes to the work queue
|
2020-04-07 22:31:47 +08:00
|
|
|
*/
|
2014-10-10 20:22:51 +08:00
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
/* Remove the ready-to-execute work from the list */
|
|
|
|
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 21:55:28 +08:00
|
|
|
while ((work = (FAR struct work_s *)dq_remfirst(&wqueue->q)) != NULL)
|
2021-06-19 17:29:30 +08:00
|
|
|
{
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 21:55:28 +08:00
|
|
|
if (work->worker == NULL)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
/* Extract the work description from the entry (in case the work
|
|
|
|
* instance will be re-used after it has been de-queued).
|
|
|
|
*/
|
|
|
|
|
|
|
|
worker = work->worker;
|
|
|
|
|
|
|
|
/* Extract the work argument (before re-enabling interrupts) */
|
|
|
|
|
|
|
|
arg = work->arg;
|
|
|
|
|
|
|
|
/* Mark the work as no longer being queued */
|
|
|
|
|
|
|
|
work->worker = NULL;
|
|
|
|
|
2023-08-28 19:29:54 +08:00
|
|
|
/* Mark the thread busy */
|
|
|
|
|
|
|
|
kworker->work = work;
|
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
/* Do the work. Re-enable interrupts while the work is being
|
|
|
|
* performed... we don't have any idea how long this will take!
|
|
|
|
*/
|
|
|
|
|
|
|
|
leave_critical_section(flags);
|
|
|
|
CALL_WORKER(worker, arg);
|
|
|
|
flags = enter_critical_section();
|
2023-08-28 19:29:54 +08:00
|
|
|
|
|
|
|
/* Mark the thread un-busy */
|
|
|
|
|
|
|
|
kworker->work = NULL;
|
|
|
|
|
|
|
|
/* Check if someone is waiting, if so, wakeup it */
|
|
|
|
|
|
|
|
nxsem_get_value(&kworker->wait, &semcount);
|
|
|
|
while (semcount++ < 0)
|
|
|
|
{
|
|
|
|
nxsem_post(&kworker->wait);
|
|
|
|
}
|
2021-06-19 17:29:30 +08:00
|
|
|
}
|
2023-04-12 17:48:56 +08:00
|
|
|
|
|
|
|
/* Then process queued work. work_process will not return until: (1)
|
|
|
|
* there is no further work in the work queue, and (2) semaphore is
|
|
|
|
* posted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
nxsem_wait_uninterruptible(&wqueue->sem);
|
2014-10-10 20:22:51 +08:00
|
|
|
}
|
|
|
|
|
2021-06-19 17:29:30 +08:00
|
|
|
leave_critical_section(flags);
|
|
|
|
|
2014-10-10 20:22:51 +08:00
|
|
|
return OK; /* To keep some compilers happy */
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:03 +08:00
|
|
|
/****************************************************************************
|
2021-06-25 14:24:45 +08:00
|
|
|
* Name: work_thread_create
|
2014-10-10 23:34:03 +08:00
|
|
|
*
|
|
|
|
* Description:
|
2021-06-25 14:24:45 +08:00
|
|
|
* This function creates and activates a work thread task with kernel-
|
|
|
|
* mode privileges.
|
2014-10-10 23:34:03 +08:00
|
|
|
*
|
2018-02-02 00:00:02 +08:00
|
|
|
* Input Parameters:
|
2021-06-25 14:24:45 +08:00
|
|
|
* name - Name of the new task
|
|
|
|
* priority - Priority of the new task
|
|
|
|
* stack_size - size (in bytes) of the stack needed
|
|
|
|
* nthread - Number of work thread should be created
|
|
|
|
* wqueue - Work queue instance
|
2014-10-10 23:34:03 +08:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2021-06-25 14:24:45 +08:00
|
|
|
* A negated errno value is returned on failure.
|
2014-10-10 23:34:03 +08:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
static int work_thread_create(FAR const char *name, int priority,
|
|
|
|
int stack_size, int nthread,
|
|
|
|
FAR struct kwork_wqueue_s *wqueue)
|
2014-10-10 23:34:03 +08:00
|
|
|
{
|
2023-08-28 19:29:54 +08:00
|
|
|
FAR char *argv[3];
|
|
|
|
char arg0[32];
|
|
|
|
char arg1[32];
|
2018-08-26 04:52:13 +08:00
|
|
|
int wndx;
|
2021-06-25 14:24:45 +08:00
|
|
|
int pid;
|
|
|
|
|
2018-08-26 04:52:13 +08:00
|
|
|
/* Don't permit any of the threads to run until we have fully initialized
|
2021-06-25 14:24:45 +08:00
|
|
|
* g_hpwork and g_lpwork.
|
2018-08-26 04:52:13 +08:00
|
|
|
*/
|
2014-10-11 02:27:11 +08:00
|
|
|
|
2018-08-26 04:52:13 +08:00
|
|
|
sched_lock();
|
2014-10-10 23:34:03 +08:00
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
for (wndx = 0; wndx < nthread; wndx++)
|
2014-10-10 23:34:03 +08:00
|
|
|
{
|
2023-08-28 19:29:54 +08:00
|
|
|
nxsem_init(&wqueue->worker[wndx].wait, 0, 0);
|
|
|
|
|
|
|
|
snprintf(arg0, sizeof(arg0), "%p", wqueue);
|
|
|
|
snprintf(arg1, sizeof(arg1), "%p", &wqueue->worker[wndx]);
|
|
|
|
argv[0] = arg0;
|
|
|
|
argv[1] = arg1;
|
|
|
|
argv[2] = NULL;
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
pid = kthread_create(name, priority, stack_size,
|
2022-10-16 01:48:35 +08:00
|
|
|
work_thread, argv);
|
2018-08-26 04:52:13 +08:00
|
|
|
|
|
|
|
DEBUGASSERT(pid > 0);
|
|
|
|
if (pid < 0)
|
|
|
|
{
|
2021-06-25 14:24:45 +08:00
|
|
|
serr("ERROR: work_thread_create %d failed: %d\n", wndx, pid);
|
2018-08-26 04:52:13 +08:00
|
|
|
sched_unlock();
|
2021-06-25 14:24:45 +08:00
|
|
|
return pid;
|
2018-08-26 04:52:13 +08:00
|
|
|
}
|
|
|
|
|
2023-08-28 19:29:54 +08:00
|
|
|
wqueue->worker[wndx].pid = pid;
|
2014-10-10 23:34:03 +08:00
|
|
|
}
|
|
|
|
|
2018-08-26 04:52:13 +08:00
|
|
|
sched_unlock();
|
2021-06-25 14:24:45 +08:00
|
|
|
return OK;
|
2014-10-10 23:34:03 +08:00
|
|
|
}
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-01-24 18:17:50 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_foreach
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Enumerate over each work thread and provide the tid of each task to a
|
|
|
|
* user callback functions.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* qid - The work queue ID
|
|
|
|
* handler - The function to be called with the pid of each task
|
|
|
|
* arg - The function callback
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void work_foreach(int qid, work_foreach_t handler, FAR void *arg)
|
|
|
|
{
|
|
|
|
FAR struct kwork_wqueue_s *wqueue;
|
|
|
|
int nthread;
|
|
|
|
int wndx;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_HPWORK
|
|
|
|
if (qid == HPWORK)
|
|
|
|
{
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)&g_hpwork;
|
|
|
|
nthread = CONFIG_SCHED_HPNTHREADS;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_LPWORK
|
|
|
|
if (qid == LPWORK)
|
|
|
|
{
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)&g_lpwork;
|
|
|
|
nthread = CONFIG_SCHED_LPNTHREADS;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (wndx = 0; wndx < nthread; wndx++)
|
|
|
|
{
|
|
|
|
handler(wqueue->worker[wndx].pid, arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-25 14:24:45 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_start_highpri
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Start the high-priority, kernel-mode worker thread(s)
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* A negated errno value is returned on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_HPWORK)
|
|
|
|
int work_start_highpri(void)
|
|
|
|
{
|
|
|
|
/* Start the high-priority, kernel mode worker thread(s) */
|
|
|
|
|
|
|
|
sinfo("Starting high-priority kernel worker thread(s)\n");
|
|
|
|
|
|
|
|
return work_thread_create(HPWORKNAME, CONFIG_SCHED_HPWORKPRIORITY,
|
|
|
|
CONFIG_SCHED_HPWORKSTACKSIZE,
|
|
|
|
CONFIG_SCHED_HPNTHREADS,
|
|
|
|
(FAR struct kwork_wqueue_s *)&g_hpwork);
|
|
|
|
}
|
2014-10-12 07:03:44 +08:00
|
|
|
#endif /* CONFIG_SCHED_HPWORK */
|
2021-06-25 14:24:45 +08:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_start_lowpri
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Start the low-priority, kernel-mode worker thread(s)
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* A negated errno value is returned on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_LPWORK)
|
|
|
|
int work_start_lowpri(void)
|
|
|
|
{
|
|
|
|
/* Start the low-priority, kernel mode worker thread(s) */
|
|
|
|
|
|
|
|
sinfo("Starting low-priority kernel worker thread(s)\n");
|
|
|
|
|
|
|
|
return work_thread_create(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
|
|
|
|
CONFIG_SCHED_LPWORKSTACKSIZE,
|
|
|
|
CONFIG_SCHED_LPNTHREADS,
|
|
|
|
(FAR struct kwork_wqueue_s *)&g_lpwork);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SCHED_LPWORK */
|
|
|
|
|
|
|
|
#endif /* CONFIG_SCHED_WORKQUEUE */
|