mm/damon/core: make damon_start() waits until kdamond_fn() starts
commit 6376a824595607e99d032a39ba3394988b4fce96 upstream. The cleanup tasks of kdamond threads including reset of corresponding DAMON context's ->kdamond field and decrease of global nr_running_ctxs counter is supposed to be executed by kdamond_fn(). However, commit0f91d13366
("mm/damon: simplify stop mechanism") made neither damon_start() nor damon_stop() ensure the corresponding kdamond has started the execution of kdamond_fn(). As a result, the cleanup can be skipped if damon_stop() is called fast enough after the previous damon_start(). Especially the skipped reset of ->kdamond could cause a use-after-free. Fix it by waiting for start of kdamond_fn() execution from damon_start(). Link: https://lkml.kernel.org/r/20231208175018.63880-1-sj@kernel.org Fixes:0f91d13366
("mm/damon: simplify stop mechanism") Signed-off-by: SeongJae Park <sj@kernel.org> Reported-by: Jakub Acs <acsjakub@amazon.de> Cc: Changbin Du <changbin.du@intel.com> Cc: Jakub Acs <acsjakub@amazon.de> Cc: <stable@vger.kernel.org> # 5.15.x Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
41f4ff9fe2
commit
ec7b81b0ab
|
@ -8,6 +8,7 @@
|
||||||
#ifndef _DAMON_H_
|
#ifndef _DAMON_H_
|
||||||
#define _DAMON_H_
|
#define _DAMON_H_
|
||||||
|
|
||||||
|
#include <linux/completion.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/time64.h>
|
#include <linux/time64.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
@ -452,6 +453,8 @@ struct damon_ctx {
|
||||||
/* private: internal use only */
|
/* private: internal use only */
|
||||||
struct timespec64 last_aggregation;
|
struct timespec64 last_aggregation;
|
||||||
struct timespec64 last_ops_update;
|
struct timespec64 last_ops_update;
|
||||||
|
/* for waiting until the execution of the kdamond_fn is started */
|
||||||
|
struct completion kdamond_started;
|
||||||
|
|
||||||
/* public: */
|
/* public: */
|
||||||
struct task_struct *kdamond;
|
struct task_struct *kdamond;
|
||||||
|
|
|
@ -383,6 +383,8 @@ struct damon_ctx *damon_new_ctx(void)
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
init_completion(&ctx->kdamond_started);
|
||||||
|
|
||||||
ctx->attrs.sample_interval = 5 * 1000;
|
ctx->attrs.sample_interval = 5 * 1000;
|
||||||
ctx->attrs.aggr_interval = 100 * 1000;
|
ctx->attrs.aggr_interval = 100 * 1000;
|
||||||
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
|
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
|
||||||
|
@ -519,11 +521,14 @@ static int __damon_start(struct damon_ctx *ctx)
|
||||||
mutex_lock(&ctx->kdamond_lock);
|
mutex_lock(&ctx->kdamond_lock);
|
||||||
if (!ctx->kdamond) {
|
if (!ctx->kdamond) {
|
||||||
err = 0;
|
err = 0;
|
||||||
|
reinit_completion(&ctx->kdamond_started);
|
||||||
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
|
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
|
||||||
nr_running_ctxs);
|
nr_running_ctxs);
|
||||||
if (IS_ERR(ctx->kdamond)) {
|
if (IS_ERR(ctx->kdamond)) {
|
||||||
err = PTR_ERR(ctx->kdamond);
|
err = PTR_ERR(ctx->kdamond);
|
||||||
ctx->kdamond = NULL;
|
ctx->kdamond = NULL;
|
||||||
|
} else {
|
||||||
|
wait_for_completion(&ctx->kdamond_started);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&ctx->kdamond_lock);
|
mutex_unlock(&ctx->kdamond_lock);
|
||||||
|
@ -1147,6 +1152,8 @@ static int kdamond_fn(void *data)
|
||||||
|
|
||||||
pr_debug("kdamond (%d) starts\n", current->pid);
|
pr_debug("kdamond (%d) starts\n", current->pid);
|
||||||
|
|
||||||
|
complete(&ctx->kdamond_started);
|
||||||
|
|
||||||
if (ctx->ops.init)
|
if (ctx->ops.init)
|
||||||
ctx->ops.init(ctx);
|
ctx->ops.init(ctx);
|
||||||
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
|
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
|
||||||
|
|
Loading…
Reference in New Issue