zephyr/kernel/unified/legacy_offload.c

90 lines
2.3 KiB
C

/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file @brief task_offload_to_fiber() function for legacy applications
*
* For the legacy applications that need task_offload_to_fiber() function,
* the moduel implements it by means of using work queue
*/
#include <kernel.h>
#include <nano_private.h>
#include <ksched.h>
#include <init.h>
struct offload_work {
struct k_work work_item;
int (*offload_func)();
void *offload_args;
struct k_thread *thread;
};
static struct k_work_q offload_work_q;
/*
* Internal handler of the offload requests
*/
static void offload_handler(struct k_work *work)
{
struct offload_work *offload =
CONTAINER_OF(work, struct offload_work, work_item);
int result = (offload->offload_func)(offload->offload_args);
unsigned int key = irq_lock();
offload->thread->swap_data = (void *)result;
irq_unlock(key);
}
int task_offload_to_fiber(int (*func)(), void *argp)
{
/*
* Create work in stack. Task is scheduled out and does not
* return until the work is consumed and complete, so the
* work item will exists until then.
*/
struct offload_work offload = {
.offload_func = func,
.offload_args = argp
};
__ASSERT(_is_preempt(_current), "Fiber is trying to offload work");
k_work_init(&offload.work_item, offload_handler);
offload.thread = _current;
k_work_submit_to_queue(&offload_work_q, &offload.work_item);
return (int)_current->swap_data;
}
static char __stack offload_work_q_stack[CONFIG_OFFLOAD_WORKQUEUE_STACK_SIZE];
static int k_offload_work_q_init(struct device *dev)
{
ARG_UNUSED(dev);
k_work_q_start(&offload_work_q,
offload_work_q_stack,
sizeof(offload_work_q_stack),
CONFIG_OFFLOAD_WORKQUEUE_PRIORITY);
return 0;
}
SYS_INIT(k_offload_work_q_init, NANOKERNEL,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);