bus: mhi: ep: Add support for sending events to the host
Add support for sending the events to the host over MHI bus from the endpoint. Following events are supported: 1. Transfer completion event 2. Command completion event 3. State change event 4. Execution Environment (EE) change event An event is sent whenever an operation has been completed in the MHI EP device. Event is sent using the MHI event ring and additionally the host is notified using an IRQ if required. Reviewed-by: Alex Elder <elder@linaro.org> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Link: https://lore.kernel.org/r/20220405135754.6622-7-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
bbdcba57a1
commit
961aeb6892
|
@ -165,6 +165,22 @@
|
|||
#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
|
||||
#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
|
||||
|
||||
/* State change event */
|
||||
#define MHI_SC_EV_PTR 0
|
||||
#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
|
||||
#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
|
||||
|
||||
/* EE event */
|
||||
#define MHI_EE_EV_PTR 0
|
||||
#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
|
||||
#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
|
||||
|
||||
|
||||
/* Command Completion event */
|
||||
#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr)
|
||||
#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
|
||||
#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
|
||||
|
||||
/* Transfer descriptor macros */
|
||||
#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
|
||||
#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
|
||||
|
@ -175,6 +191,12 @@
|
|||
FIELD_PREP(BIT(9), ieot) | \
|
||||
FIELD_PREP(BIT(8), ieob) | \
|
||||
FIELD_PREP(BIT(0), chain))
|
||||
#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr)
|
||||
#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
|
||||
#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
|
||||
#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
|
||||
#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
|
||||
#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
|
||||
|
||||
/* RSC transfer descriptor macros */
|
||||
#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
|
||||
|
|
|
@ -197,4 +197,8 @@ void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *s
|
|||
void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
|
||||
void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
|
||||
|
||||
/* MHI EP core functions */
|
||||
int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
|
||||
int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,93 @@
|
|||
|
||||
static DEFINE_IDA(mhi_ep_cntrl_ida);
|
||||
|
||||
static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
|
||||
struct mhi_ring_element *el, bool bei)
|
||||
{
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
union mhi_ep_ring_ctx *ctx;
|
||||
struct mhi_ep_ring *ring;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mhi_cntrl->event_lock);
|
||||
ring = &mhi_cntrl->mhi_event[ring_idx].ring;
|
||||
ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
|
||||
if (!ring->started) {
|
||||
ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
|
||||
goto err_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add element to the event ring */
|
||||
ret = mhi_ep_ring_add_element(ring, el);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
mutex_unlock(&mhi_cntrl->event_lock);
|
||||
|
||||
/*
|
||||
* Raise IRQ to host only if the BEI flag is not set in TRE. Host might
|
||||
* set this flag for interrupt moderation as per MHI protocol.
|
||||
*/
|
||||
if (!bei)
|
||||
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&mhi_cntrl->event_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
|
||||
struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
|
||||
{
|
||||
struct mhi_ring_element event = {};
|
||||
|
||||
event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
|
||||
event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
|
||||
event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
|
||||
|
||||
return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
|
||||
}
|
||||
|
||||
int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
|
||||
{
|
||||
struct mhi_ring_element event = {};
|
||||
|
||||
event.dword[0] = MHI_SC_EV_DWORD0(state);
|
||||
event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
|
||||
|
||||
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
|
||||
}
|
||||
|
||||
int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
|
||||
{
|
||||
struct mhi_ring_element event = {};
|
||||
|
||||
event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
|
||||
event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
|
||||
|
||||
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
|
||||
}
|
||||
|
||||
static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
|
||||
{
|
||||
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
|
||||
struct mhi_ring_element event = {};
|
||||
|
||||
event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
|
||||
event.dword[0] = MHI_CC_EV_DWORD0(code);
|
||||
event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
|
||||
|
||||
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
|
||||
}
|
||||
|
||||
static void mhi_ep_release_device(struct device *dev)
|
||||
{
|
||||
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
|
||||
|
@ -227,6 +314,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
goto err_free_ch;
|
||||
}
|
||||
|
||||
mutex_init(&mhi_cntrl->event_lock);
|
||||
|
||||
/* Set MHI version and AMSS EE before enumeration */
|
||||
mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
|
||||
mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
|
||||
|
|
|
@ -59,10 +59,14 @@ struct mhi_ep_db_info {
|
|||
* @mhi_event: Points to the event ring configurations table
|
||||
* @mhi_cmd: Points to the command ring configurations table
|
||||
* @sm: MHI Endpoint state machine
|
||||
* @ch_ctx_cache: Cache of host channel context data structure
|
||||
* @ev_ctx_cache: Cache of host event context data structure
|
||||
* @cmd_ctx_cache: Cache of host command context data structure
|
||||
* @ch_ctx_host_pa: Physical address of host channel context data structure
|
||||
* @ev_ctx_host_pa: Physical address of host event context data structure
|
||||
* @cmd_ctx_host_pa: Physical address of host command context data structure
|
||||
* @chdb: Array of channel doorbell interrupt info
|
||||
* @event_lock: Lock for protecting event rings
|
||||
* @raise_irq: CB function for raising IRQ to the host
|
||||
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
|
||||
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
|
||||
|
@ -87,11 +91,15 @@ struct mhi_ep_cntrl {
|
|||
struct mhi_ep_cmd *mhi_cmd;
|
||||
struct mhi_ep_sm *sm;
|
||||
|
||||
struct mhi_chan_ctxt *ch_ctx_cache;
|
||||
struct mhi_event_ctxt *ev_ctx_cache;
|
||||
struct mhi_cmd_ctxt *cmd_ctx_cache;
|
||||
u64 ch_ctx_host_pa;
|
||||
u64 ev_ctx_host_pa;
|
||||
u64 cmd_ctx_host_pa;
|
||||
|
||||
struct mhi_ep_db_info chdb[4];
|
||||
struct mutex event_lock;
|
||||
|
||||
void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
|
||||
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
|
||||
|
|
Loading…
Reference in New Issue