Skip to content

Commit

Permalink
Merge tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/mani/mhi into char-misc-next

Manivannan writes:

MHI changes for v5.12

Loic improved the MHI PCI generic controller by adding support for DIAG channel,
PCI error handling, suspend/recovery/resume, and health check. Loic also added
support for resetting the MHI device as per the MHI specification. This includes
writing to a specific register for default cases and looking for controller
specific callback when provided.

Along with this Loic, also added a new API which gets the number for free TREs
(Transfer Ring Elements) from the MHI core. The client drivers can make use of
this API and the current consumer is the "mhi-net" driver. For taking both the
"mhi-net" driver change and the API change, we created "mhi-net-immutable"
branch for this patch and merged the same into net-next and mhi-next.

Carl added a patch which lets the controller driver to pass the custom IRQ
flags for BHI and MHI event interrupts to the MHI core. The current consumer of
this feature is the ath11k MHI controller driver. For taking both the changes,
we created "mhi-ath11k-immutable" branch for this patch and merged into
ath11k-next and mhi-next.

Finally, Loic cleaned up the MHI queue APIs and fixed the shared MSI vector
support.

* tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
  bus: mhi: pci_generic: Increase num of elements in hw event ring
  mhi: pci_generic: Print warning in case of firmware crash
  bus: mhi: core: Add helper API to return number of free TREs
  mhi: core: Factorize mhi queuing
  mhi: use irq_flags if controller driver configures it
  mhi: pci_generic: Fix shared MSI vector support
  mhi: unconstify mhi_event_config
  bus: mhi: Ensure correct ring update ordering with memory barrier
  mhi: pci_generic: Set irq moderation value to 1ms for hw channels
  mhi: pci_generic: Add diag channels
  mhi: pci_generic: Increase controller timeout value
  mhi: pci_generic: Add health-check
  mhi: pci_generic: Add PCI error handlers
  mhi: pci_generic: Add suspend/resume/recovery procedure
  mhi: pci_generic: Add support for reset
  mhi: pci_generic: Enable burst mode for hardware channels
  mhi: pci-generic: Increase number of hardware events
  bus: mhi: core: Add device hardware reset support
  • Loading branch information
Greg Kroah-Hartman committed Feb 5, 2021
2 parents 1609faa + 026c5b1 commit 37f1cda
Show file tree
Hide file tree
Showing 4 changed files with 461 additions and 145 deletions.
9 changes: 7 additions & 2 deletions drivers/bus/mhi/core/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,12 +151,17 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;

/* if controller driver has set irq_flags, use it */
if (mhi_cntrl->irq_flags)
irq_flags = mhi_cntrl->irq_flags;

/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
IRQF_SHARED | IRQF_NO_SUSPEND,
irq_flags,
"bhi", mhi_cntrl);
if (ret)
return ret;
Expand All @@ -174,7 +179,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)

ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
IRQF_SHARED | IRQF_NO_SUSPEND,
irq_flags,
"mhi", mhi_event);
if (ret) {
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
Expand Down
194 changes: 76 additions & 118 deletions drivers/bus/mhi/core/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,14 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
dma_addr_t db;

db = ring->iommu_base + (ring->wp - ring->base);

/*
* Writes to the new ring element must be visible to the hardware
* before letting h/w know there is new element to fetch.
*/
dma_wmb();
*ring->ctxt_wp = db;

mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
ring->db_addr, db);
}
Expand All @@ -135,6 +142,19 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);

void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->reset) {
mhi_cntrl->reset(mhi_cntrl);
return;
}

/* Generic MHI SoC reset */
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
MHI_SOC_RESET_REQ);
}
EXPORT_SYMBOL_GPL(mhi_soc_reset);

int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
Expand Down Expand Up @@ -260,6 +280,18 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}

int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
enum dma_data_direction dir)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
mhi_dev->ul_chan : mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
}
EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);

void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
Expand Down Expand Up @@ -947,118 +979,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp);
}

int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;

/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
return -EINVAL;
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;

if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

read_lock_bh(&mhi_cntrl->pm_lock);
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) {
ret = -ENOMEM;
goto exit_unlock;
}

/* we're in M3 or transitioning to M3 */
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
goto exit_unlock;

/* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);

/* Toggle wake to exit out of M2 */
/* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);

buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;

ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}

if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);

if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
ret = -EIO;
goto exit_unlock;
}

read_unlock_bh(&mhi_cntrl->pm_lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);

return 0;
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);

int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
int ret;

/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
return -EINVAL;

if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;

read_lock_bh(&mhi_cntrl->pm_lock);
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
dev_err(dev, "MHI is not in activate state, PM state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state));
read_unlock_bh(&mhi_cntrl->pm_lock);
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;

return -EIO;
}
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;

/* we're in M3 or transitioning to M3 */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);

/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
{
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };

buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;

ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}

if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);

if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
}

read_unlock_bh(&mhi_cntrl->pm_lock);
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;

return 0;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);

Expand Down Expand Up @@ -1112,57 +1114,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;

/*
* this check here only as a guard, it's always
* possible mhi can enter error while executing rest of function,
* which is not fatal so we do not need to hold pm_lock
*/
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;

tre_ring = &mhi_chan->tre_ring;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;

buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;

ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;

read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

/* we're in M3 or transitioning to M3 */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);

/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);

if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);

if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
unsigned long flags;

read_lock_irqsave(&mhi_chan->lock, flags);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irqrestore(&mhi_chan->lock, flags);
}

read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

return 0;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);

Expand Down
Loading

0 comments on commit 37f1cda

Please sign in to comment.