bus: mhi: host: Add spinlock to protect WP access when queueing TREs

commit b89b6a863dd53bc70d8e52d50f9cfaef8ef5e9c9 upstream.

Protect WP accesses such that multiple threads queueing buffers for
incoming data do not race.

Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
__local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
take irqsave lock after TRE is generated to avoid running write_unlock_bh
when irqsave lock is held.

Cc: stable@vger.kernel.org
Fixes: 189ff97cca ("bus: mhi: core: Add support for data transfer")
Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/1702276972-41296-2-git-send-email-quic_qianyu@quicinc.com
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Bhaumik Bhatt 2023-12-11 14:42:51 +08:00 committed by Greg Kroah-Hartman
parent 3c5ec66b4b
commit 0b093176fd
1 changed files with 13 additions and 9 deletions

View File

@ -1124,17 +1124,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO; return -EIO;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
ret = mhi_is_ring_full(mhi_cntrl, tre_ring); ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) { if (unlikely(ret))
ret = -EAGAIN; return -EAGAIN;
goto exit_unlock;
}
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret)) if (unlikely(ret))
goto exit_unlock; return ret;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* Packet is queued, take a usage ref to exit M3 if necessary /* Packet is queued, take a usage ref to exit M3 if necessary
* for host->device buffer, balanced put is done on buffer completion * for host->device buffer, balanced put is done on buffer completion
@ -1154,7 +1152,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (dir == DMA_FROM_DEVICE) if (dir == DMA_FROM_DEVICE)
mhi_cntrl->runtime_put(mhi_cntrl); mhi_cntrl->runtime_put(mhi_cntrl);
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret; return ret;
@ -1206,6 +1203,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int eot, eob, chain, bei; int eot, eob, chain, bei;
int ret; int ret;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
buf_ring = &mhi_chan->buf_ring; buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring; tre_ring = &mhi_chan->tre_ring;
@ -1223,9 +1223,11 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) { if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
if (ret) if (ret) {
write_unlock_bh(&mhi_chan->lock);
return ret; return ret;
} }
}
eob = !!(flags & MHI_EOB); eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT); eot = !!(flags & MHI_EOT);
@ -1241,6 +1243,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring); mhi_add_ring_element(mhi_cntrl, buf_ring);
write_unlock_bh(&mhi_chan->lock);
return 0; return 0;
} }