usb: mtu3: fix kernel panic at qmu transfer done irq handler
[ Upstream commitd28f4091ea
] When handle qmu transfer irq, it will unlock @mtu->lock before give back request, if another thread handle disconnect event at the same time, and try to disable ep, it may lock @mtu->lock and free qmu ring, then qmu irq hanlder may get a NULL gpd, avoid the KE by checking gpd's value before handling it. e.g. qmu done irq on cpu0 thread running on cpu1 qmu_done_tx() handle gpd [0] mtu3_requ_complete() mtu3_gadget_ep_disable() unlock @mtu->lock give back request lock @mtu->lock mtu3_ep_disable() mtu3_gpd_ring_free() unlock @mtu->lock lock @mtu->lock get next gpd [1] [1]: goto [0] to handle next gpd, and next gpd may be NULL. Fixes:48e0d3735a
("usb: mtu3: supports new QMU format") Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com> Link: https://lore.kernel.org/r/20230417025203.18097-3-chunfeng.yun@mediatek.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
17993a13b5
commit
f262734286
|
@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
|
|||
return ring->enqueue;
|
||||
}
|
||||
|
||||
/* @dequeue may be NULL if ring is unallocated or freed */
|
||||
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
|
||||
{
|
||||
if (ring->dequeue < ring->end)
|
||||
|
@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
|
|||
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
|
||||
__func__, epnum, gpd, gpd_current, ring->enqueue);
|
||||
|
||||
while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
|
||||
while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
|
||||
|
||||
mreq = next_request(mep);
|
||||
|
||||
|
@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
|
|||
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
|
||||
__func__, epnum, gpd, gpd_current, ring->enqueue);
|
||||
|
||||
while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
|
||||
while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
|
||||
|
||||
mreq = next_request(mep);
|
||||
|
||||
|
|
Loading…
Reference in New Issue