incubator-nuttx/mm/mm_heap/mm_free.c

211 lines
6.4 KiB
C

/****************************************************************************
* mm/mm_heap/mm_free.c
*
* Copyright (C) 2007, 2009, 2013-2014 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/mm/mm.h>
/****************************************************************************
* Private Functions
****************************************************************************/
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
{
FAR struct mm_delaynode_s *tmp = mem;
irqstate_t flags;
/* Delay the deallocation until a more appropriate time. */
flags = enter_critical_section();
tmp->flink = heap->mm_delaylist;
heap->mm_delaylist = tmp;
leave_critical_section(flags);
}
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: mm_free
*
* Description:
* Returns a chunk of memory to the list of free nodes, merging with
* adjacent free chunks if possible.
*
****************************************************************************/
void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
{
FAR struct mm_freenode_s *node;
FAR struct mm_freenode_s *prev;
FAR struct mm_freenode_s *next;
int ret;
UNUSED(ret);
minfo("Freeing %p\n", mem);
/* Protect against attempts to free a NULL reference */
if (!mem)
{
return;
}
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
/* Check current environment */
if (up_interrupt_context())
{
/* We are in ISR, add to mm_delaylist */
mm_add_delaylist(heap, mem);
return;
}
else if ((ret = mm_trysemaphore(heap)) == 0)
{
/* Got the sem, do free immediately */
}
else if (ret == -ESRCH || sched_idletask())
{
/* We are in IDLE task & can't get sem, or meet -ESRCH return,
* which means we are in situations during context switching(See
* mm_trysemaphore() & getpid()). Then add to mm_delaylist.
*/
mm_add_delaylist(heap, mem);
return;
}
else
#endif
{
/* We need to hold the MM semaphore while we muck with the
* nodelist.
*/
mm_takesemaphore(heap);
}
DEBUGASSERT(mm_heapmember(heap, mem));
/* Map the memory chunk into a free node */
node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE);
/* Sanity check against double-frees */
DEBUGASSERT(node->preceding & MM_ALLOC_BIT);
node->preceding &= ~MM_ALLOC_BIT;
/* Check if the following node is free and, if so, merge it */
next = (FAR struct mm_freenode_s *)((FAR char *)node + node->size);
DEBUGASSERT((next->preceding & ~MM_ALLOC_BIT) == node->size);
if ((next->preceding & MM_ALLOC_BIT) == 0)
{
FAR struct mm_allocnode_s *andbeyond;
/* Get the node following the next node (which will
* become the new next node). We know that we can never
* index past the tail chunk because it is always allocated.
*/
andbeyond = (FAR struct mm_allocnode_s *)
((FAR char *)next + next->size);
/* Remove the next node. There must be a predecessor,
* but there may not be a successor node.
*/
DEBUGASSERT(next->blink);
next->blink->flink = next->flink;
if (next->flink)
{
next->flink->blink = next->blink;
}
/* Then merge the two chunks */
node->size += next->size;
andbeyond->preceding = node->size |
(andbeyond->preceding & MM_ALLOC_BIT);
next = (FAR struct mm_freenode_s *)andbeyond;
}
/* Check if the preceding node is also free and, if so, merge
* it with this node
*/
prev = (FAR struct mm_freenode_s *)((FAR char *)node - node->preceding);
DEBUGASSERT((node->preceding & ~MM_ALLOC_BIT) == prev->size);
if ((prev->preceding & MM_ALLOC_BIT) == 0)
{
/* Remove the node. There must be a predecessor, but there may
* not be a successor node.
*/
DEBUGASSERT(prev->blink);
prev->blink->flink = prev->flink;
if (prev->flink)
{
prev->flink->blink = prev->blink;
}
/* Then merge the two chunks */
prev->size += node->size;
next->preceding = prev->size | (next->preceding & MM_ALLOC_BIT);
node = prev;
}
/* Add the merged node to the nodelist */
mm_addfreechunk(heap, node);
mm_givesemaphore(heap);
}