ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()

[ Upstream commit a240bc5c43 ]

Wear-leveling entry could be freed in error path, which may be accessed
again in eraseblk_count_seq_show(), for example:

__erase_worker                eraseblk_count_seq_show
                                wl = ubi->lookuptbl[*block_number]
				if (wl)
  wl_entry_destroy
    ubi->lookuptbl[e->pnum] = NULL
    kmem_cache_free(ubi_wl_entry_slab, e)
		                   erase_count = wl->ec  // UAF!

Wear-leveling entry updating/accessing in ubi->lookuptbl should be
protected by ubi->wl_lock, fix it by adding ubi->wl_lock to serialize
wl entry accessing between wl_entry_destroy() and
eraseblk_count_seq_show().

Fetch a reproducer in [Link].

Link: https://bugzilla.kernel.org/show_bug.cgi?id=216305
Fixes: 7bccd12d27 ("ubi: Add debugfs file for tracking PEB state")
Fixes: 801c135ce7 ("UBI: Unsorted Block Images")
Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Zhihao Cheng 2022-07-30 19:28:37 +08:00 committed by Greg Kroah-Hartman
parent c670b05127
commit 84253f3c2d
1 changed files with 8 additions and 1 deletions

View File

@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
if (e2)
if (e2) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
}
goto out_ro;
}
@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling