2016-09-21 04:18:20 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file @brief task_offload_to_fiber() function for legacy applications
|
|
|
|
*
|
|
|
|
* For the legacy applications that need task_offload_to_fiber() function,
|
|
|
|
* the moduel implements it by means of using work queue
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <nano_private.h>
|
2016-10-13 22:31:48 +08:00
|
|
|
#include <ksched.h>
|
2016-09-21 04:18:20 +08:00
|
|
|
#include <init.h>
|
|
|
|
|
|
|
|
struct offload_work {
|
|
|
|
struct k_work work_item;
|
|
|
|
int (*offload_func)();
|
|
|
|
void *offload_args;
|
2016-10-06 05:32:01 +08:00
|
|
|
struct k_thread *thread;
|
2016-09-21 04:18:20 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct k_work_q offload_work_q;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal handler of the offload requests
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void offload_handler(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct offload_work *offload =
|
|
|
|
CONTAINER_OF(work, struct offload_work, work_item);
|
|
|
|
int result = (offload->offload_func)(offload->offload_args);
|
|
|
|
unsigned int key = irq_lock();
|
|
|
|
|
|
|
|
offload->thread->swap_data = (void *)result;
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int task_offload_to_fiber(int (*func)(), void *argp)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Create work in stack. Task is scheduled out and does not
|
|
|
|
* return until the work is consumed and complete, so the
|
|
|
|
* work item will exists until then.
|
|
|
|
*/
|
|
|
|
struct offload_work offload = {
|
|
|
|
.offload_func = func,
|
|
|
|
.offload_args = argp
|
|
|
|
};
|
|
|
|
|
|
|
|
__ASSERT(_is_preempt(_current), "Fiber is trying to offload work");
|
|
|
|
|
|
|
|
k_work_init(&offload.work_item, offload_handler);
|
|
|
|
|
|
|
|
offload.thread = _current;
|
|
|
|
k_work_submit_to_queue(&offload_work_q, &offload.work_item);
|
|
|
|
return (int)_current->swap_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char __stack offload_work_q_stack[CONFIG_OFFLOAD_WORKQUEUE_STACK_SIZE];
|
|
|
|
|
|
|
|
static const struct k_thread_config offload_work_q_config = {
|
|
|
|
.stack = offload_work_q_stack,
|
|
|
|
.stack_size = sizeof(offload_work_q_stack),
|
|
|
|
.prio = CONFIG_OFFLOAD_WORKQUEUE_PRIORITY,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int k_offload_work_q_init(struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
|
|
|
k_work_q_start(&offload_work_q, &offload_work_q_config);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(k_offload_work_q_init, NANOKERNEL,
|
|
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|