mm_heap/backtrace: move MM_ADD_BACKTRACE out of heap lock

move MM_ADD_BACKTRACE out of heap lock to improve the performance

Signed-off-by: chao.an <anchao@xiaomi.com>
This commit is contained in:
chao.an 2022-08-01 15:59:37 +08:00 committed by Xiang Xiao
parent 87cb224b1c
commit 5db0ab1e6a
3 changed files with 8 additions and 6 deletions

View File

@ -229,7 +229,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
/* Handle the case of an exact size match */
node->preceding |= MM_ALLOC_BIT;
MM_ADD_BACKTRACE(heap, node);
ret = (FAR void *)((FAR char *)node + SIZEOF_MM_ALLOCNODE);
}
@ -238,6 +237,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
if (ret)
{
MM_ADD_BACKTRACE(heap, node);
kasan_unpoison(ret, mm_malloc_size(ret));
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(ret, 0xaa, alignsize - SIZEOF_MM_ALLOCNODE);

View File

@ -178,7 +178,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
newnode->size = (size_t)next - (size_t)newnode;
newnode->preceding = precedingsize | MM_ALLOC_BIT;
MM_ADD_BACKTRACE(heap, newnode);
/* Reduce the size of the original chunk and mark it not allocated, */
@ -224,6 +223,8 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
mm_givesemaphore(heap);
MM_ADD_BACKTRACE(heap, node);
kasan_unpoison((FAR void *)alignedchunk,
mm_malloc_size((FAR void *)alignedchunk));

View File

@ -128,11 +128,12 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
oldsize - oldnode->size);
}
MM_ADD_BACKTRACE(heap, oldnode);
/* Then return the original address */
mm_givesemaphore(heap);
MM_ADD_BACKTRACE(heap, oldnode);
return oldmem;
}
@ -334,10 +335,10 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
}
}
MM_ADD_BACKTRACE(heap, (FAR char *)newmem - SIZEOF_MM_ALLOCNODE);
mm_givesemaphore(heap);
MM_ADD_BACKTRACE(heap, (FAR char *)newmem - SIZEOF_MM_ALLOCNODE);
kasan_unpoison(newmem, mm_malloc_size(newmem));
if (newmem != oldmem)
{