mm: call sched_note within mm lock

Signed-off-by: xuxingliang <xuxingliang@xiaomi.com>
This commit is contained in:
xuxingliang 2024-09-04 10:38:51 +08:00 committed by Xiang Xiao
parent 5397a58731
commit bc9d6549e9
4 changed files with 32 additions and 18 deletions

View File

@ -206,10 +206,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
mm_addfreechunk(heap, node); mm_addfreechunk(heap, node);
heap->mm_curused += 2 * MM_SIZEOF_ALLOCNODE; heap->mm_curused += 2 * MM_SIZEOF_ALLOCNODE;
mm_unlock(heap);
sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize, sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize,
heap->mm_curused); heap->mm_curused);
mm_unlock(heap);
} }
/**************************************************************************** /****************************************************************************

View File

@ -321,14 +321,19 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
} }
DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret)); DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret));
if (ret)
{
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
}
mm_unlock(heap); mm_unlock(heap);
if (ret) if (ret)
{ {
MM_ADD_BACKTRACE(heap, node); MM_ADD_BACKTRACE(heap, node);
ret = kasan_unpoison(ret, nodesize - MM_ALLOCNODE_OVERHEAD); ret = kasan_unpoison(ret, nodesize - MM_ALLOCNODE_OVERHEAD);
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
#ifdef CONFIG_MM_FILL_ALLOCATIONS #ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(ret, MM_ALLOC_MAGIC, alignsize - MM_ALLOCNODE_OVERHEAD); memset(ret, MM_ALLOC_MAGIC, alignsize - MM_ALLOCNODE_OVERHEAD);
#endif #endif

View File

@ -277,15 +277,15 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
heap->mm_maxused = heap->mm_curused; heap->mm_maxused = heap->mm_curused;
} }
sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,
heap->mm_curused);
mm_unlock(heap); mm_unlock(heap);
MM_ADD_BACKTRACE(heap, node); MM_ADD_BACKTRACE(heap, node);
alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk, alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk,
size - MM_ALLOCNODE_OVERHEAD); size - MM_ALLOCNODE_OVERHEAD);
sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,
heap->mm_curused);
DEBUGASSERT(alignedchunk % alignment == 0); DEBUGASSERT(alignedchunk % alignment == 0);
return (FAR void *)alignedchunk; return (FAR void *)alignedchunk;
} }

View File

@ -699,10 +699,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
/* Add memory to the tlsf pool */ /* Add memory to the tlsf pool */
tlsf_add_pool(heap->mm_tlsf, heapstart, heapsize); tlsf_add_pool(heap->mm_tlsf, heapstart, heapsize);
mm_unlock(heap);
sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize, sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize,
heap->mm_curused); heap->mm_curused);
mm_unlock(heap);
} }
/**************************************************************************** /****************************************************************************
@ -1333,6 +1332,12 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
heap->mm_maxused = heap->mm_curused; heap->mm_maxused = heap->mm_curused;
} }
if (ret)
{
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
}
mm_unlock(heap); mm_unlock(heap);
if (ret) if (ret)
@ -1344,8 +1349,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
#endif #endif
ret = kasan_unpoison(ret, nodesize); ret = kasan_unpoison(ret, nodesize);
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
#ifdef CONFIG_MM_FILL_ALLOCATIONS #ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(ret, 0xaa, nodesize); memset(ret, 0xaa, nodesize);
@ -1415,6 +1418,12 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
heap->mm_maxused = heap->mm_curused; heap->mm_maxused = heap->mm_curused;
} }
if (ret)
{
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
}
mm_unlock(heap); mm_unlock(heap);
if (ret) if (ret)
@ -1425,8 +1434,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
memdump_backtrace(heap, buf); memdump_backtrace(heap, buf);
#endif #endif
ret = kasan_unpoison(ret, nodesize); ret = kasan_unpoison(ret, nodesize);
sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
heap->mm_curused);
} }
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0 #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
@ -1545,6 +1552,14 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
heap->mm_maxused = heap->mm_curused; heap->mm_maxused = heap->mm_curused;
} }
if (newmem)
{
sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize,
heap->mm_curused - newsize);
sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize,
heap->mm_curused);
}
mm_unlock(heap); mm_unlock(heap);
if (newmem) if (newmem)
@ -1553,11 +1568,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
FAR struct memdump_backtrace_s *buf = newmem + newsize; FAR struct memdump_backtrace_s *buf = newmem + newsize;
memdump_backtrace(heap, buf); memdump_backtrace(heap, buf);
#endif #endif
sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize,
heap->mm_curused - newsize);
sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize,
heap->mm_curused);
} }
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0 #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0