2014-06-04 02:41:34 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* net/iob/iob_alloc.c
|
|
|
|
*
|
2016-01-23 07:35:06 +08:00
|
|
|
* Copyright (C) 2014, 2016 Gregory Nutt. All rights reserved.
|
2014-06-04 02:41:34 +08:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2016-06-12 04:14:08 +08:00
|
|
|
#if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_IOB_DEBUG)
|
2014-06-23 06:25:26 +08:00
|
|
|
/* Force debug output (from this file only) */
|
|
|
|
|
|
|
|
# undef CONFIG_DEBUG_NET
|
|
|
|
# define CONFIG_DEBUG_NET 1
|
|
|
|
#endif
|
|
|
|
|
2014-06-23 01:27:57 +08:00
|
|
|
#include <semaphore.h>
|
|
|
|
#include <assert.h>
|
2016-01-23 07:35:06 +08:00
|
|
|
#include <errno.h>
|
2014-06-23 01:27:57 +08:00
|
|
|
|
2016-02-14 22:38:44 +08:00
|
|
|
#include <nuttx/irq.h>
|
2014-06-06 23:35:31 +08:00
|
|
|
#include <nuttx/arch.h>
|
2014-06-04 02:41:34 +08:00
|
|
|
#include <nuttx/net/iob.h>
|
|
|
|
|
|
|
|
#include "iob.h"
|
|
|
|
|
|
|
|
/****************************************************************************
|
2014-06-23 01:27:57 +08:00
|
|
|
* Private Functions
|
2014-06-04 02:41:34 +08:00
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-23 01:27:57 +08:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: iob_allocwait
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Allocate an I/O buffer, waiting if necessary. This function cannot be
|
|
|
|
* called from any interrupt level logic.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
static FAR struct iob_s *iob_allocwait(bool throttled)
|
2014-06-23 01:27:57 +08:00
|
|
|
{
|
|
|
|
FAR struct iob_s *iob;
|
|
|
|
irqstate_t flags;
|
2014-06-25 01:53:19 +08:00
|
|
|
FAR sem_t *sem;
|
2015-07-02 04:33:37 +08:00
|
|
|
int ret = OK;
|
2014-06-23 01:27:57 +08:00
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
/* Select the semaphore count to check. */
|
|
|
|
|
2014-06-25 05:38:00 +08:00
|
|
|
sem = (throttled ? &g_throttle_sem : &g_iob_sem);
|
2014-06-25 01:53:19 +08:00
|
|
|
#else
|
|
|
|
sem = &g_iob_sem;
|
|
|
|
#endif
|
|
|
|
|
2014-06-23 01:27:57 +08:00
|
|
|
/* The following must be atomic; interrupt must be disabled so that there
|
|
|
|
* is no conflict with interrupt level I/O buffer allocations. This is
|
|
|
|
* not as bad as it sounds because interrupts will be re-enabled while
|
|
|
|
* we are waiting for I/O buffers to become free.
|
|
|
|
*/
|
|
|
|
|
2016-02-14 22:38:44 +08:00
|
|
|
flags = enter_critical_section();
|
2014-06-23 01:27:57 +08:00
|
|
|
do
|
|
|
|
{
|
|
|
|
/* Try to get an I/O buffer. If successful, the semaphore count
|
|
|
|
* will be decremented atomically.
|
|
|
|
*/
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
iob = iob_tryalloc(throttled);
|
2014-06-23 01:27:57 +08:00
|
|
|
if (!iob)
|
|
|
|
{
|
|
|
|
/* If not successful, then the semaphore count was less than or
|
|
|
|
* equal to zero (meaning that there are no free buffers). We
|
|
|
|
* need to wait for an I/O buffer to be released when the semaphore
|
|
|
|
* count will be incremented.
|
|
|
|
*/
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
ret = sem_wait(sem);
|
2016-01-23 06:15:10 +08:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
int errcode = get_errno();
|
|
|
|
|
|
|
|
/* EINTR is not an error! EINTR simply means that we were
|
|
|
|
* awakened by a signal and we should try again.
|
2016-01-26 07:46:59 +08:00
|
|
|
*
|
|
|
|
* REVISIT: Many end-user interfaces are required to return
|
|
|
|
* with an error if EINTR is set. Most uses of this function
|
|
|
|
* is in internal, non-user logic. But are there cases where
|
|
|
|
* the error should be returned.
|
2016-01-23 06:15:10 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (errcode == EINTR)
|
|
|
|
{
|
|
|
|
/* Force a success indication so that we will continue
|
|
|
|
* looping.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
}
|
2016-01-26 07:46:59 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Stop the loop and return a error */
|
|
|
|
|
|
|
|
DEBUGASSERT(errcode > 0);
|
|
|
|
ret = -errcode;
|
|
|
|
}
|
2016-01-23 06:15:10 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* When we wake up from wait successfully, an I/O buffer was
|
|
|
|
* returned to the free list. However, if there are concurrent
|
|
|
|
* allocations from interrupt handling, then I suspect that
|
|
|
|
* there is a race condition. But no harm, we will just wait
|
|
|
|
* again in that case.
|
|
|
|
*
|
|
|
|
* We need release our count so that it is available to
|
|
|
|
* iob_tryalloc(), perhaps allowing another thread to take our
|
|
|
|
* count. In that event, iob_tryalloc() will fail above and
|
|
|
|
* we will have to wait again.
|
|
|
|
*
|
|
|
|
* TODO: Consider a design modification to permit us to
|
|
|
|
* complete the allocation without losing our count.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sem_post(sem);
|
|
|
|
}
|
2014-06-23 01:27:57 +08:00
|
|
|
}
|
|
|
|
}
|
2016-01-23 06:15:10 +08:00
|
|
|
while (ret == OK && iob == NULL);
|
2014-06-23 01:27:57 +08:00
|
|
|
|
2016-02-14 22:38:44 +08:00
|
|
|
leave_critical_section(flags);
|
2014-06-23 01:27:57 +08:00
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: iob_alloc
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Allocate an I/O buffer by taking the buffer at the head of the free list.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
FAR struct iob_s *iob_alloc(bool throttled)
|
2014-06-23 01:27:57 +08:00
|
|
|
{
|
|
|
|
/* Were we called from the interrupt level? */
|
|
|
|
|
|
|
|
if (up_interrupt_context())
|
|
|
|
{
|
|
|
|
/* Yes, then try to allocate an I/O buffer without waiting */
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
return iob_tryalloc(throttled);
|
2014-06-23 01:27:57 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Then allocate an I/O buffer, waiting as necessary */
|
|
|
|
|
2014-06-25 01:53:19 +08:00
|
|
|
return iob_allocwait(throttled);
|
2014-06-23 01:27:57 +08:00
|
|
|
}
|
|
|
|
}
|
2015-01-28 11:23:42 +08:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: iob_tryalloc
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Try to allocate an I/O buffer by taking the buffer at the head of the
|
|
|
|
* free list without waiting for a buffer to become free.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
FAR struct iob_s *iob_tryalloc(bool throttled)
|
|
|
|
{
|
|
|
|
FAR struct iob_s *iob;
|
|
|
|
irqstate_t flags;
|
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
FAR sem_t *sem;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
/* Select the semaphore count to check. */
|
|
|
|
|
|
|
|
sem = (throttled ? &g_throttle_sem : &g_iob_sem);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* We don't know what context we are called from so we use extreme measures
|
|
|
|
* to protect the free list: We disable interrupts very briefly.
|
|
|
|
*/
|
|
|
|
|
2016-02-14 22:38:44 +08:00
|
|
|
flags = enter_critical_section();
|
2015-01-28 11:23:42 +08:00
|
|
|
|
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
/* If there are free I/O buffers for this allocation */
|
|
|
|
|
|
|
|
if (sem->semcount > 0)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Take the I/O buffer from the head of the free list */
|
|
|
|
|
|
|
|
iob = g_iob_freelist;
|
|
|
|
if (iob)
|
|
|
|
{
|
|
|
|
/* Remove the I/O buffer from the free list and decrement the
|
|
|
|
* counting semaphore(s) that tracks the number of available
|
|
|
|
* IOBs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
g_iob_freelist = iob->io_flink;
|
|
|
|
|
|
|
|
/* Take a semaphore count. Note that we cannot do this in
|
|
|
|
* in the orthodox way by calling sem_wait() or sem_trywait()
|
|
|
|
* because this function may be called from an interrupt
|
|
|
|
* handler. Fortunately we know at at least one free buffer
|
|
|
|
* so a simple decrement is all that is needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
g_iob_sem.semcount--;
|
|
|
|
DEBUGASSERT(g_iob_sem.semcount >= 0);
|
|
|
|
|
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
/* The throttle semaphore is a little more complicated because
|
|
|
|
* it can be negative! Decrementing is still safe, however.
|
|
|
|
*/
|
|
|
|
|
|
|
|
g_throttle_sem.semcount--;
|
|
|
|
DEBUGASSERT(g_throttle_sem.semcount >= -CONFIG_IOB_THROTTLE);
|
|
|
|
#endif
|
2016-02-14 22:38:44 +08:00
|
|
|
leave_critical_section(flags);
|
2015-01-28 11:23:42 +08:00
|
|
|
|
|
|
|
/* Put the I/O buffer in a known state */
|
|
|
|
|
|
|
|
iob->io_flink = NULL; /* Not in a chain */
|
|
|
|
iob->io_len = 0; /* Length of the data in the entry */
|
|
|
|
iob->io_offset = 0; /* Offset to the beginning of data */
|
|
|
|
iob->io_pktlen = 0; /* Total length of the packet */
|
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-14 22:38:44 +08:00
|
|
|
leave_critical_section(flags);
|
2015-01-28 11:23:42 +08:00
|
|
|
return NULL;
|
|
|
|
}
|