mempool:fix bug when use MM_BACKTRACE
caculate blk address when mempool_multiple_free have a bug. need a real blocksize to caulate the memory address. Signed-off-by: anjiahao <anjiahao@xiaomi.com>
This commit is contained in:
parent
84059a949c
commit
4ced3c9642
|
@ -34,6 +34,18 @@
|
|||
#include <nuttx/spinlock.h>
|
||||
#include <nuttx/semaphore.h>
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
# define MEMPOOL_REALBLOCKSIZE(pool) (ALIGN_UP(pool->blocksize + \
|
||||
sizeof(struct mempool_backtrace_s), \
|
||||
pool->blockalign))
|
||||
#else
|
||||
# define MEMPOOL_REALBLOCKSIZE(pool) (pool->blocksize)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
@ -70,6 +82,9 @@ struct mempool_procfs_entry_s
|
|||
struct mempool_s
|
||||
{
|
||||
size_t blocksize; /* The size for every block in mempool */
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
size_t blockalign; /* The alignment of the blocksize */
|
||||
#endif
|
||||
size_t initialsize; /* The initialize size in normal mempool */
|
||||
size_t interruptsize; /* The initialize size in interrupt mempool */
|
||||
size_t expandsize; /* The size of expand block every time for mempool */
|
||||
|
@ -101,6 +116,17 @@ struct mempool_s
|
|||
#endif
|
||||
};
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
struct mempool_backtrace_s
|
||||
{
|
||||
FAR struct list_node node;
|
||||
pid_t pid;
|
||||
# if CONFIG_MM_BACKTRACE > 0
|
||||
FAR void *backtrace[CONFIG_MM_BACKTRACE];
|
||||
# endif
|
||||
};
|
||||
#endif
|
||||
|
||||
struct mempoolinfo_s
|
||||
{
|
||||
unsigned long arena; /* This is the total size of mempool */
|
||||
|
|
|
@ -41,21 +41,6 @@
|
|||
#undef ALIGN_UP
|
||||
#define ALIGN_UP(x, a) (((x) + ((a) - 1)) & (~((a) - 1)))
|
||||
|
||||
/****************************************************************************
|
||||
* Private Types
|
||||
****************************************************************************/
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
struct mempool_backtrace_s
|
||||
{
|
||||
FAR struct list_node node;
|
||||
pid_t pid;
|
||||
# if CONFIG_MM_BACKTRACE > 0
|
||||
FAR void *backtrace[CONFIG_MM_BACKTRACE];
|
||||
# endif
|
||||
};
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
@ -143,13 +128,7 @@ static inline void mempool_add_backtrace(FAR struct mempool_s *pool,
|
|||
|
||||
int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
size_t blocksize = ALIGN_UP(pool->blocksize +
|
||||
sizeof(struct mempool_backtrace_s),
|
||||
pool->blocksize);
|
||||
#else
|
||||
size_t blocksize = pool->blocksize;
|
||||
#endif
|
||||
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
||||
|
||||
sq_init(&pool->queue);
|
||||
sq_init(&pool->iqueue);
|
||||
|
@ -262,13 +241,7 @@ retry:
|
|||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
if (pool->expandsize > sizeof(sq_entry_t))
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
size_t blocksize = ALIGN_UP(pool->blocksize +
|
||||
sizeof(struct mempool_backtrace_s),
|
||||
pool->blocksize);
|
||||
#else
|
||||
size_t blocksize = pool->blocksize;
|
||||
#endif
|
||||
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
||||
size_t nexpand = (pool->expandsize - sizeof(sq_entry_t)) /
|
||||
blocksize;
|
||||
size_t size = nexpand * blocksize + sizeof(sq_entry_t);
|
||||
|
@ -326,17 +299,13 @@ out_with_lock:
|
|||
void mempool_free(FAR struct mempool_s *pool, FAR void *blk)
|
||||
{
|
||||
irqstate_t flags = spin_lock_irqsave(&pool->lock);
|
||||
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
size_t blocksize = ALIGN_UP(pool->blocksize +
|
||||
sizeof(struct mempool_backtrace_s),
|
||||
pool->blocksize);
|
||||
FAR struct mempool_backtrace_s *buf =
|
||||
(FAR struct mempool_backtrace_s *)((FAR char *)blk + pool->blocksize);
|
||||
|
||||
list_delete(&buf->node);
|
||||
#else
|
||||
size_t blocksize = pool->blocksize;
|
||||
|
||||
pool->nalloc--;
|
||||
#endif
|
||||
|
||||
|
@ -557,13 +526,7 @@ void mempool_memdump(FAR struct mempool_s *pool, pid_t pid)
|
|||
|
||||
int mempool_deinit(FAR struct mempool_s *pool)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
size_t blocksize = ALIGN_UP(pool->blocksize +
|
||||
sizeof(struct mempool_backtrace_s),
|
||||
pool->blocksize);
|
||||
#else
|
||||
size_t blocksize = pool->blocksize;
|
||||
#endif
|
||||
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
||||
FAR sq_entry_t *blk;
|
||||
size_t count = 0;
|
||||
|
||||
|
|
|
@ -328,7 +328,9 @@ mempool_multiple_init(FAR const char *name,
|
|||
pools[i].alloc = mempool_multiple_alloc_callback;
|
||||
pools[i].free = mempool_multiple_free_callback;
|
||||
pools[i].calibrate = calibrate;
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
pools[i].blockalign = mpool->minpoolsize;
|
||||
#endif
|
||||
ret = mempool_init(pools + i, name);
|
||||
if (ret < 0)
|
||||
{
|
||||
|
@ -509,7 +511,7 @@ int mempool_multiple_free(FAR struct mempool_multiple_s *mpool,
|
|||
|
||||
blk = (FAR char *)blk - (((FAR char *)blk -
|
||||
((FAR char *)dict->addr + mpool->minpoolsize)) %
|
||||
dict->pool->blocksize);
|
||||
MEMPOOL_REALBLOCKSIZE(dict->pool));
|
||||
mempool_free(dict->pool, blk);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue