i2c: tegra: Share same DMA channel for RX and TX

Allocate only one DMA channel for I2C and share it for both TX and RX
instead of using two different DMA hardware channels with the same
slave ID. Since I2C supports only half duplex, there is no impact on
perf with this.

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Wolfram Sang <wsa@kernel.org>
This commit is contained in:
Akhil R 2023-04-27 18:09:15 +05:30 committed by Wolfram Sang
parent bcfc2ab7f4
commit fcc8a89a1c
1 changed files with 20 additions and 49 deletions

View File

@ -249,8 +249,7 @@ struct tegra_i2c_hw_feature {
* @msg_read: indicates that the transfer is a read access
* @timings: i2c timings information like bus frequency
* @multimaster_mode: indicates that I2C controller is in multi-master mode
* @tx_dma_chan: DMA transmit channel
* @rx_dma_chan: DMA receive channel
* @dma_chan: DMA channel
* @dma_phys: handle to DMA resources
* @dma_buf: pointer to allocated DMA buffer
* @dma_buf_size: DMA buffer size
@ -283,8 +282,7 @@ struct tegra_i2c_dev {
u8 *msg_buf;
struct completion dma_complete;
struct dma_chan *tx_dma_chan;
struct dma_chan *rx_dma_chan;
struct dma_chan *dma_chan;
unsigned int dma_buf_size;
struct device *dma_dev;
dma_addr_t dma_phys;
@ -393,16 +391,14 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
{
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction dir;
struct dma_chan *chan;
dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len);
reinit_completion(&i2c_dev->dma_complete);
dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
chan = i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan;
dma_desc = dmaengine_prep_slave_single(chan, i2c_dev->dma_phys,
dma_desc = dmaengine_prep_slave_single(i2c_dev->dma_chan, i2c_dev->dma_phys,
len, dir, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!dma_desc) {
@ -415,7 +411,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
dma_desc->callback_param = i2c_dev;
dmaengine_submit(dma_desc);
dma_async_issue_pending(chan);
dma_async_issue_pending(i2c_dev->dma_chan);
return 0;
}
@ -428,20 +424,14 @@ static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
i2c_dev->dma_buf = NULL;
}
if (i2c_dev->tx_dma_chan) {
dma_release_channel(i2c_dev->tx_dma_chan);
i2c_dev->tx_dma_chan = NULL;
}
if (i2c_dev->rx_dma_chan) {
dma_release_channel(i2c_dev->rx_dma_chan);
i2c_dev->rx_dma_chan = NULL;
if (i2c_dev->dma_chan) {
dma_release_channel(i2c_dev->dma_chan);
i2c_dev->dma_chan = NULL;
}
}
static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
{
struct dma_chan *chan;
dma_addr_t dma_phys;
u32 *dma_buf;
int err;
@ -459,25 +449,18 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
return 0;
}
chan = dma_request_chan(i2c_dev->dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
/*
* The same channel will be used for both RX and TX.
* Keeping the name as "tx" for backward compatibility
* with existing devicetrees.
*/
i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx");
if (IS_ERR(i2c_dev->dma_chan)) {
err = PTR_ERR(i2c_dev->dma_chan);
goto err_out;
}
i2c_dev->rx_dma_chan = chan;
chan = dma_request_chan(i2c_dev->dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_out;
}
i2c_dev->tx_dma_chan = chan;
WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device);
i2c_dev->dma_dev = chan->device->dev;
i2c_dev->dma_dev = i2c_dev->dma_chan->device->dev;
i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
I2C_PACKET_HEADER_SIZE;
@ -976,11 +959,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read)
dmaengine_terminate_async(i2c_dev->rx_dma_chan);
else
dmaengine_terminate_async(i2c_dev->tx_dma_chan);
dmaengine_terminate_async(i2c_dev->dma_chan);
complete(&i2c_dev->dma_complete);
}
@ -994,7 +973,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
{
struct dma_slave_config slv_config = {0};
u32 val, reg, dma_burst, reg_offset;
struct dma_chan *chan;
int err;
if (i2c_dev->hw->has_mst_fifo)
@ -1011,7 +989,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
dma_burst = 8;
if (i2c_dev->msg_read) {
chan = i2c_dev->rx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_RX_FIFO);
slv_config.src_addr = i2c_dev->base_phys + reg_offset;
@ -1023,7 +1000,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
else
val = I2C_FIFO_CONTROL_RX_TRIG(dma_burst);
} else {
chan = i2c_dev->tx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_TX_FIFO);
slv_config.dst_addr = i2c_dev->base_phys + reg_offset;
@ -1037,7 +1013,7 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
}
slv_config.device_fc = true;
err = dmaengine_slave_config(chan, &slv_config);
err = dmaengine_slave_config(i2c_dev->dma_chan, &slv_config);
if (err) {
dev_err(i2c_dev->dev, "DMA config failed: %d\n", err);
dev_err(i2c_dev->dev, "falling back to PIO\n");
@ -1347,13 +1323,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
* performs synchronization after the transfer's termination
* and we want to get a completion if transfer succeeded.
*/
dmaengine_synchronize(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan);
dmaengine_terminate_sync(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan);
dmaengine_synchronize(i2c_dev->dma_chan);
dmaengine_terminate_sync(i2c_dev->dma_chan);
if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
dev_err(i2c_dev->dev, "DMA transfer timed out\n");