diff --git a/arch/sim/src/sim/posix/sim_hostmemory.c b/arch/sim/src/sim/posix/sim_hostmemory.c index 0c9da4c9d3..4677b70681 100644 --- a/arch/sim/src/sim/posix/sim_hostmemory.c +++ b/arch/sim/src/sim/posix/sim_hostmemory.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -45,9 +44,6 @@ * Private Data ****************************************************************************/ -static atomic_int g_aordblks; -static atomic_int g_uordblks; - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -175,31 +171,21 @@ void *host_memalign(size_t alignment, size_t size) return NULL; } - size = host_mallocsize(p); - atomic_fetch_add(&g_aordblks, 1); - atomic_fetch_add(&g_uordblks, size); - return p; } void host_free(void *mem) { - size_t size; - if (mem == NULL) { return; } - size = host_mallocsize(mem); - atomic_fetch_sub(&g_aordblks, 1); - atomic_fetch_sub(&g_uordblks, size); host_uninterruptible_no_return(free, mem); } void *host_realloc(void *oldmem, size_t size) { - size_t oldsize; void *mem; if (oldmem == NULL) @@ -207,21 +193,6 @@ void *host_realloc(void *oldmem, size_t size) return host_memalign(sizeof(void *), size); } - oldsize = host_mallocsize(oldmem); mem = host_uninterruptible(realloc, oldmem, size); - if (mem == NULL) - { - return NULL; - } - - size = host_mallocsize(mem); - atomic_fetch_add(&g_uordblks, size - oldsize); - return mem; } - -void host_mallinfo(int *aordblks, int *uordblks) -{ - *aordblks = atomic_load(&g_aordblks); - *uordblks = atomic_load(&g_uordblks); -} diff --git a/arch/sim/src/sim/sim_heap.c b/arch/sim/src/sim/sim_heap.c index e0260a7656..f7b3ea092c 100644 --- a/arch/sim/src/sim/sim_heap.c +++ b/arch/sim/src/sim/sim_heap.c @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -50,6 +51,9 @@ struct mm_delaynode_s struct mm_heap_s { struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS]; + atomic_int aordblks; + atomic_int uordblks; + atomic_int usmblks; #if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO) struct procfs_meminfo_entry_s mm_procfs; @@ -227,6 +231,9 @@ void mm_free(struct mm_heap_s *heap, void *mem) } else { + int size = host_mallocsize(mem); + atomic_fetch_sub(&heap->aordblks, 1); + atomic_fetch_sub(&heap->uordblks, size); host_free(mem); } } @@ -255,10 +262,40 @@ void mm_free(struct mm_heap_s *heap, void *mem) ****************************************************************************/ void *mm_realloc(struct mm_heap_s *heap, void *oldmem, - size_t size) + size_t size) { + void *mem; + int uordblks; + int usmblks; + int newsize; + mm_free_delaylist(heap); - return host_realloc(oldmem, size); + + if (size == 0) + { + mm_free(heap, oldmem); + return NULL; + } + + atomic_fetch_sub(&heap->uordblks, host_mallocsize(oldmem)); + mem = host_realloc(oldmem, size); + + atomic_fetch_add(&heap->aordblks, oldmem == NULL && mem != NULL); + newsize = host_mallocsize(mem ? mem : oldmem); + atomic_fetch_add(&heap->uordblks, newsize); + usmblks = atomic_load(&heap->usmblks); + + do + { + uordblks = atomic_load(&heap->uordblks); + if (uordblks <= usmblks) + { + break; + } + } + while (atomic_compare_exchange_weak(&heap->usmblks, &usmblks, uordblks)); + + return mem; } /**************************************************************************** @@ -315,11 +352,36 @@ void *mm_zalloc(struct mm_heap_s *heap, size_t size) * ****************************************************************************/ -void *mm_memalign(struct mm_heap_s *heap, size_t alignment, - size_t size) +void *mm_memalign(struct mm_heap_s *heap, size_t alignment, size_t size) { + void *mem; + int uordblks; + int usmblks; + mm_free_delaylist(heap); - return host_memalign(alignment, size); + mem = host_memalign(alignment, size); + + if (mem == NULL) + { + return NULL; + } + + size = host_mallocsize(mem); + atomic_fetch_add(&heap->aordblks, 1); + atomic_fetch_add(&heap->uordblks, size); + usmblks = atomic_load(&heap->usmblks); + + do + { + uordblks = atomic_load(&heap->uordblks); + if (uordblks <= usmblks) + { + break; + } + } + while (atomic_compare_exchange_weak(&heap->usmblks, &usmblks, uordblks)); + + return mem; } /**************************************************************************** @@ -385,7 +447,9 @@ struct mallinfo mm_mallinfo(struct mm_heap_s *heap) struct mallinfo info; memset(&info, 0, sizeof(struct mallinfo)); - host_mallinfo(&info.aordblks, &info.uordblks); + info.aordblks = atomic_load(&heap->aordblks); + info.uordblks = atomic_load(&heap->uordblks); + info.usmblks = atomic_load(&heap->usmblks); return info; } diff --git a/arch/sim/src/sim/sim_internal.h b/arch/sim/src/sim/sim_internal.h index 50bb44219b..c5869bfd3c 100644 --- a/arch/sim/src/sim/sim_internal.h +++ b/arch/sim/src/sim/sim_internal.h @@ -225,7 +225,6 @@ size_t host_mallocsize(void *mem); void *host_memalign(size_t alignment, size_t size); void host_free(void *mem); void *host_realloc(void *oldmem, size_t size); -void host_mallinfo(int *aordblks, int *uordblks); int host_unlinkshmem(const char *name); /* sim_hosttime.c ***********************************************************/ diff --git a/arch/sim/src/sim/win/sim_hostmemory.c b/arch/sim/src/sim/win/sim_hostmemory.c index a94fecb2d7..b900b9792d 100644 --- a/arch/sim/src/sim/win/sim_hostmemory.c +++ b/arch/sim/src/sim/win/sim_hostmemory.c @@ -96,7 +96,3 @@ void *host_realloc(void *oldmem, size_t size) { return _aligned_realloc(oldmem, size, 8); } - -void host_mallinfo(int *aordblks, int *uordblks) -{ -} diff --git a/fs/procfs/fs_procfsmeminfo.c b/fs/procfs/fs_procfsmeminfo.c index 35313ff490..aac6a8ab08 100644 --- a/fs/procfs/fs_procfsmeminfo.c +++ b/fs/procfs/fs_procfsmeminfo.c @@ -291,8 +291,9 @@ static ssize_t meminfo_read(FAR struct file *filep, FAR char *buffer, /* The first line is the headers */ linesize = procfs_snprintf(procfile->line, MEMINFO_LINELEN, - "%13s%11s%11s%11s%11s%7s%7s\n", "", "total", - "used", "free", "largest", "nused", "nfree"); + "%13s%11s%11s%11s%11s%11s%7s%7s\n", "", + "total", "used", "free", "maxused", + "maxfree", "nused", "nfree"); copysize = procfs_memcpy(procfile->line, linesize, buffer, buflen, &offset); @@ -306,23 +307,24 @@ static ssize_t meminfo_read(FAR struct file *filep, FAR char *buffer, { if (buflen > 0) { - struct mallinfo minfo; + struct mallinfo info; buffer += copysize; buflen -= copysize; /* Show heap information */ - minfo = mm_mallinfo(entry->heap); + info = mm_mallinfo(entry->heap); linesize = procfs_snprintf(procfile->line, MEMINFO_LINELEN, - "%12s:%11lu%11lu%11lu%11lu%7lu%7lu\n", - entry->name, - (unsigned long)minfo.arena, - (unsigned long)minfo.uordblks, - (unsigned long)minfo.fordblks, - (unsigned long)minfo.mxordblk, - (unsigned long)minfo.aordblks, - (unsigned long)minfo.ordblks); + "%12s:%11lu%11lu%11lu%11lu%11lu" + "%7lu%7lu\n", entry->name, + (unsigned long)info.arena, + (unsigned long)info.uordblks, + (unsigned long)info.fordblks, + (unsigned long)info.usmblks, + (unsigned long)info.mxordblk, + (unsigned long)info.aordblks, + (unsigned long)info.ordblks); copysize = procfs_memcpy(procfile->line, linesize, buffer, buflen, &offset); totalsize += copysize; diff --git a/include/malloc.h b/include/malloc.h index 8ea7f2c966..aac2671b2e 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -68,6 +68,7 @@ struct mallinfo * chunks handed out by malloc. */ int fordblks; /* This is the total size of memory occupied * by free (not in use) chunks. */ + int usmblks; /* This is the largest amount of space ever allocated */ }; struct malltask diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h index 163aa46818..f15a7d29ed 100644 --- a/mm/mm_heap/mm.h +++ b/mm/mm_heap/mm.h @@ -230,6 +230,14 @@ struct mm_heap_s size_t mm_heapsize; + /* This is the heap maximum used memory size */ + + size_t mm_maxused; + + /* This is the current used size of the heap */ + + size_t mm_curused; + /* This is the first and last nodes of the heap */ FAR struct mm_allocnode_s *mm_heapstart[CONFIG_MM_REGIONS]; diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c index b41464da9f..d2f10fff77 100644 --- a/mm/mm_heap/mm_free.c +++ b/mm/mm_heap/mm_free.c @@ -121,6 +121,10 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem) node->size &= ~MM_ALLOC_BIT; + /* Update heap statistics */ + + heap->mm_curused -= nodesize; + /* Check if the following node is free and, if so, merge it */ next = (FAR struct mm_freenode_s *)((FAR char *)node + nodesize); diff --git a/mm/mm_heap/mm_mallinfo.c b/mm/mm_heap/mm_mallinfo.c index 84bc51718d..090dc93636 100644 --- a/mm/mm_heap/mm_mallinfo.c +++ b/mm/mm_heap/mm_mallinfo.c @@ -145,6 +145,7 @@ struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap) info.arena = heap->mm_heapsize; info.arena += sizeof(struct mm_heap_s); info.uordblks += sizeof(struct mm_heap_s); + info.usmblks = heap->mm_maxused + sizeof(struct mm_heap_s); #if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 poolinfo = mempool_multiple_mallinfo(heap->mm_mpool); diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c index df2624aba0..03691c4b1e 100644 --- a/mm/mm_heap/mm_malloc.c +++ b/mm/mm_heap/mm_malloc.c @@ -251,6 +251,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) next->size &= ~MM_PREVFREE_BIT; } + /* Update heap statistics */ + + heap->mm_curused += MM_SIZEOF_NODE(node); + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + /* Handle the case of an exact size match */ node->size |= MM_ALLOC_BIT; diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c index 0dc2651cb7..0e137cd91c 100644 --- a/mm/mm_heap/mm_memalign.c +++ b/mm/mm_heap/mm_memalign.c @@ -259,6 +259,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, mm_shrinkchunk(heap, node, size); } + /* Update heap statistics */ + + heap->mm_curused += MM_SIZEOF_NODE(node); + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + mm_unlock(heap); MM_ADD_BACKTRACE(heap, node); diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c index d2b63f1756..12c95da0bc 100644 --- a/mm/mm_heap/mm_realloc.c +++ b/mm/mm_heap/mm_realloc.c @@ -365,6 +365,14 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, } } + /* Update heap statistics */ + + heap->mm_curused += newsize - oldsize; + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + mm_unlock(heap); MM_ADD_BACKTRACE(heap, (FAR char *)newmem - MM_SIZEOF_ALLOCNODE); diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c index 0f93c2229f..1bf906a4c1 100644 --- a/mm/tlsf/mm_tlsf.c +++ b/mm/tlsf/mm_tlsf.c @@ -78,6 +78,14 @@ struct mm_heap_s size_t mm_heapsize; + /* This is the heap maximum used memory size */ + + size_t mm_maxused; + + /* This is the current used size of the heap */ + + size_t mm_curused; + /* This is the first and last of the heap */ FAR void *mm_heapstart[CONFIG_MM_REGIONS]; @@ -698,6 +706,10 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem) kasan_poison(mem, mm_malloc_size(heap, mem)); + /* Update heap statistics */ + + heap->mm_curused -= mm_malloc_size(heap, mem); + /* Pass, return to the tlsf pool */ tlsf_free(heap->mm_tlsf, mem); @@ -891,6 +903,7 @@ struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap) info.arena = heap->mm_heapsize; info.uordblks = info.arena - info.fordblks; + info.usmblks = heap->mm_maxused; #if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 poolinfo = mempool_multiple_mallinfo(heap->mm_mpool); @@ -1061,6 +1074,12 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) ret = tlsf_malloc(heap->mm_tlsf, size); #endif + heap->mm_curused += mm_malloc_size(heap, ret); + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + mm_unlock(heap); if (ret) @@ -1119,6 +1138,13 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, #else ret = tlsf_memalign(heap->mm_tlsf, alignment, size); #endif + + heap->mm_curused += mm_malloc_size(heap, ret); + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + mm_unlock(heap); if (ret) @@ -1217,12 +1243,20 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, /* Allocate from the tlsf pool */ DEBUGVERIFY(mm_lock(heap)); + heap->mm_curused -= mm_malloc_size(heap, oldmem); #if CONFIG_MM_BACKTRACE >= 0 newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size + sizeof(struct memdump_backtrace_s)); #else newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size); #endif + + heap->mm_curused += mm_malloc_size(heap, newmem ? newmem : oldmem); + if (heap->mm_curused > heap->mm_maxused) + { + heap->mm_maxused = heap->mm_curused; + } + mm_unlock(heap); #if CONFIG_MM_BACKTRACE >= 0