blk-cgroup: synchronize pd_free_fn() from blkg_free_workfn() and blkcg_deactivate_policy()
Currently parent pd can be freed before child pd: t1: remove cgroup C1 blkcg_destroy_blkgs blkg_destroy list_del_init(&blkg->q_node) // remove blkg from queue list percpu_ref_kill(&blkg->refcnt) blkg_release call_rcu t2: from t1 __blkg_release blkg_free schedule_work t4: deactivate policy blkcg_deactivate_policy pd_free_fn // parent of C1 is freed first t3: from t2 blkg_free_workfn pd_free_fn If policy(for example, ioc_timer_fn() from iocost) access parent pd from child pd after pd_offline_fn(), then UAF can be triggered. Fix the problem by delaying 'list_del_init(&blkg->q_node)' from blkg_destroy() to blkg_free_workfn(), and using a new disk level mutex to synchronize blkg_free_workfn() and blkcg_deactivate_policy(). Signed-off-by: Yu Kuai <yukuai3@huawei.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230119110350.2287325-4-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dfd6200a09
commit
f1c006f1c6
|
@ -118,16 +118,32 @@ static void blkg_free_workfn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
|
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
|
||||||
free_work);
|
free_work);
|
||||||
|
struct request_queue *q = blkg->q;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
|
||||||
|
* in order to make sure pd_free_fn() is called in order, the deletion
|
||||||
|
* of the list blkg->q_node is delayed to here from blkg_destroy(), and
|
||||||
|
* blkcg_mutex is used to synchronize blkg_free_workfn() and
|
||||||
|
* blkcg_deactivate_policy().
|
||||||
|
*/
|
||||||
|
if (q)
|
||||||
|
mutex_lock(&q->blkcg_mutex);
|
||||||
|
|
||||||
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
||||||
if (blkg->pd[i])
|
if (blkg->pd[i])
|
||||||
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
||||||
|
|
||||||
if (blkg->parent)
|
if (blkg->parent)
|
||||||
blkg_put(blkg->parent);
|
blkg_put(blkg->parent);
|
||||||
if (blkg->q)
|
|
||||||
blk_put_queue(blkg->q);
|
if (q) {
|
||||||
|
list_del_init(&blkg->q_node);
|
||||||
|
mutex_unlock(&q->blkcg_mutex);
|
||||||
|
blk_put_queue(q);
|
||||||
|
}
|
||||||
|
|
||||||
free_percpu(blkg->iostat_cpu);
|
free_percpu(blkg->iostat_cpu);
|
||||||
percpu_ref_exit(&blkg->refcnt);
|
percpu_ref_exit(&blkg->refcnt);
|
||||||
kfree(blkg);
|
kfree(blkg);
|
||||||
|
@ -462,9 +478,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
|
||||||
lockdep_assert_held(&blkg->q->queue_lock);
|
lockdep_assert_held(&blkg->q->queue_lock);
|
||||||
lockdep_assert_held(&blkcg->lock);
|
lockdep_assert_held(&blkcg->lock);
|
||||||
|
|
||||||
/* Something wrong if we are trying to remove same group twice */
|
/*
|
||||||
WARN_ON_ONCE(list_empty(&blkg->q_node));
|
* blkg stays on the queue list until blkg_free_workfn(), see details in
|
||||||
WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
|
* blkg_free_workfn(), hence this function can be called from
|
||||||
|
* blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
|
||||||
|
* blkg_free_workfn().
|
||||||
|
*/
|
||||||
|
if (hlist_unhashed(&blkg->blkcg_node))
|
||||||
|
return;
|
||||||
|
|
||||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||||
struct blkcg_policy *pol = blkcg_policy[i];
|
struct blkcg_policy *pol = blkcg_policy[i];
|
||||||
|
@ -479,7 +500,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
|
||||||
blkg->online = false;
|
blkg->online = false;
|
||||||
|
|
||||||
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
|
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
|
||||||
list_del_init(&blkg->q_node);
|
|
||||||
hlist_del_init_rcu(&blkg->blkcg_node);
|
hlist_del_init_rcu(&blkg->blkcg_node);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1280,6 +1300,7 @@ int blkcg_init_disk(struct gendisk *disk)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&q->blkg_list);
|
INIT_LIST_HEAD(&q->blkg_list);
|
||||||
|
mutex_init(&q->blkcg_mutex);
|
||||||
|
|
||||||
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
|
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
|
||||||
if (!new_blkg)
|
if (!new_blkg)
|
||||||
|
@ -1520,6 +1541,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
|
|
||||||
|
mutex_lock(&q->blkcg_mutex);
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
|
||||||
__clear_bit(pol->plid, q->blkcg_pols);
|
__clear_bit(pol->plid, q->blkcg_pols);
|
||||||
|
@ -1538,6 +1560,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
mutex_unlock(&q->blkcg_mutex);
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
|
|
|
@ -485,6 +485,7 @@ struct request_queue {
|
||||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||||
struct blkcg_gq *root_blkg;
|
struct blkcg_gq *root_blkg;
|
||||||
struct list_head blkg_list;
|
struct list_head blkg_list;
|
||||||
|
struct mutex blkcg_mutex;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct queue_limits limits;
|
struct queue_limits limits;
|
||||||
|
|
Loading…
Reference in New Issue