diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c index 4b884c4a05..61b1b91a6c 100644 --- a/mm/mm_heap/mm_initialize.c +++ b/mm/mm_heap/mm_initialize.c @@ -206,10 +206,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart, mm_addfreechunk(heap, node); heap->mm_curused += 2 * MM_SIZEOF_ALLOCNODE; - mm_unlock(heap); - sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize, heap->mm_curused); + mm_unlock(heap); } /**************************************************************************** diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c index c8cd761e2b..18fae3511b 100644 --- a/mm/mm_heap/mm_malloc.c +++ b/mm/mm_heap/mm_malloc.c @@ -321,14 +321,19 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) } DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret)); + + if (ret) + { + sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, + heap->mm_curused); + } + mm_unlock(heap); if (ret) { MM_ADD_BACKTRACE(heap, node); ret = kasan_unpoison(ret, nodesize - MM_ALLOCNODE_OVERHEAD); - sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, - heap->mm_curused); #ifdef CONFIG_MM_FILL_ALLOCATIONS memset(ret, MM_ALLOC_MAGIC, alignsize - MM_ALLOCNODE_OVERHEAD); #endif diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c index cdf6338b1e..060b1f6f17 100644 --- a/mm/mm_heap/mm_memalign.c +++ b/mm/mm_heap/mm_memalign.c @@ -277,15 +277,15 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, heap->mm_maxused = heap->mm_curused; } + sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size, + heap->mm_curused); + mm_unlock(heap); MM_ADD_BACKTRACE(heap, node); alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk, size - MM_ALLOCNODE_OVERHEAD); - sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size, - heap->mm_curused); - DEBUGASSERT(alignedchunk % alignment == 0); return (FAR void *)alignedchunk; } diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c index 816d5e0973..f97022d4a3 100644 --- a/mm/tlsf/mm_tlsf.c +++ b/mm/tlsf/mm_tlsf.c @@ -699,10 +699,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart, /* Add memory to the tlsf pool */ tlsf_add_pool(heap->mm_tlsf, heapstart, heapsize); - mm_unlock(heap); - sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize, heap->mm_curused); + mm_unlock(heap); } /**************************************************************************** @@ -1333,6 +1332,12 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) heap->mm_maxused = heap->mm_curused; } + if (ret) + { + sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, + heap->mm_curused); + } + mm_unlock(heap); if (ret) @@ -1344,8 +1349,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) #endif ret = kasan_unpoison(ret, nodesize); - sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, - heap->mm_curused); #ifdef CONFIG_MM_FILL_ALLOCATIONS memset(ret, 0xaa, nodesize); @@ -1415,6 +1418,12 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, heap->mm_maxused = heap->mm_curused; } + if (ret) + { + sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, + heap->mm_curused); + } + mm_unlock(heap); if (ret) @@ -1425,8 +1434,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, memdump_backtrace(heap, buf); #endif ret = kasan_unpoison(ret, nodesize); - sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize, - heap->mm_curused); } #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0 @@ -1545,6 +1552,14 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, heap->mm_maxused = heap->mm_curused; } + if (newmem) + { + sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize, + heap->mm_curused - newsize); + sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize, + heap->mm_curused); + } + mm_unlock(heap); if (newmem) @@ -1553,11 +1568,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, FAR struct memdump_backtrace_s *buf = newmem + newsize; memdump_backtrace(heap, buf); #endif - - sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize, - heap->mm_curused - newsize); - sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize, - heap->mm_curused); } #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0