Skip to content

Commit

Permalink
thunderbolt: Use spinlock in NHI serialization
Browse files Browse the repository at this point in the history
This is needed because ring polling functionality can be called from
atomic contexts when networking and other high-speed traffic is
transferred over a Thunderbolt cable.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Michael Jamet <michael.jamet@intel.com>
Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Mika Westerberg authored and David S. Miller committed Oct 2, 2017
1 parent 22b7de1 commit 59120e0
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 35 deletions.
75 changes: 41 additions & 34 deletions drivers/thunderbolt/nhi.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,21 +327,9 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
if (transmit && hop == RING_E2E_UNUSED_HOPID)
return NULL;

mutex_lock(&nhi->lock);
if (hop >= nhi->hop_count) {
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
goto err;
}
if (transmit && nhi->tx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
goto err;
} else if (!transmit && nhi->rx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
goto err;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto err;
return NULL;

spin_lock_init(&ring->lock);
INIT_LIST_HEAD(&ring->queue);
Expand All @@ -359,25 +347,45 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->tail = 0;
ring->running = false;

if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
goto err;

ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
if (!ring->descriptors)
goto err;
goto err_free_ring;

if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
goto err_free_descs;

spin_lock_irq(&nhi->lock);
if (hop >= nhi->hop_count) {
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
goto err_release_msix;
}
if (transmit && nhi->tx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
goto err_release_msix;
} else if (!transmit && nhi->rx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
goto err_release_msix;
}
if (transmit)
nhi->tx_rings[hop] = ring;
else
nhi->rx_rings[hop] = ring;
mutex_unlock(&nhi->lock);
spin_unlock_irq(&nhi->lock);

return ring;

err:
err_release_msix:
spin_unlock_irq(&nhi->lock);
ring_release_msix(ring);
err_free_descs:
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
err_free_ring:
kfree(ring);
mutex_unlock(&nhi->lock);

return NULL;
}

Expand Down Expand Up @@ -421,8 +429,8 @@ void tb_ring_start(struct tb_ring *ring)
u16 frame_size;
u32 flags;

mutex_lock(&ring->nhi->lock);
spin_lock_irq(&ring->lock);
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
if (ring->nhi->going_away)
goto err;
if (ring->running) {
Expand Down Expand Up @@ -469,8 +477,8 @@ void tb_ring_start(struct tb_ring *ring)
ring_interrupt_active(ring, true);
ring->running = true;
err:
spin_unlock_irq(&ring->lock);
mutex_unlock(&ring->nhi->lock);
spin_unlock(&ring->lock);
spin_unlock_irq(&ring->nhi->lock);
}
EXPORT_SYMBOL_GPL(tb_ring_start);

Expand All @@ -489,8 +497,8 @@ EXPORT_SYMBOL_GPL(tb_ring_start);
*/
void tb_ring_stop(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
spin_lock_irq(&ring->lock);
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
Expand All @@ -511,8 +519,8 @@ void tb_ring_stop(struct tb_ring *ring)
ring->running = false;

err:
spin_unlock_irq(&ring->lock);
mutex_unlock(&ring->nhi->lock);
spin_unlock(&ring->lock);
spin_unlock_irq(&ring->nhi->lock);

/*
* schedule ring->work to invoke callbacks on all remaining frames.
Expand All @@ -534,7 +542,7 @@ EXPORT_SYMBOL_GPL(tb_ring_stop);
*/
void tb_ring_free(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
spin_lock_irq(&ring->nhi->lock);
/*
* Dissociate the ring from the NHI. This also ensures that
* nhi_interrupt_work cannot reschedule ring->work.
Expand Down Expand Up @@ -564,7 +572,7 @@ void tb_ring_free(struct tb_ring *ring)
RING_TYPE(ring),
ring->hop);

mutex_unlock(&ring->nhi->lock);
spin_unlock_irq(&ring->nhi->lock);
/**
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
Expand Down Expand Up @@ -639,7 +647,7 @@ static void nhi_interrupt_work(struct work_struct *work)
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
struct tb_ring *ring;

mutex_lock(&nhi->lock);
spin_lock_irq(&nhi->lock);

/*
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
Expand Down Expand Up @@ -677,7 +685,7 @@ static void nhi_interrupt_work(struct work_struct *work)
/* we do not check ring->running, this is done in ring->work */
schedule_work(&ring->work);
}
mutex_unlock(&nhi->lock);
spin_unlock_irq(&nhi->lock);
}

static irqreturn_t nhi_msi(int irq, void *data)
Expand Down Expand Up @@ -767,7 +775,6 @@ static void nhi_shutdown(struct tb_nhi *nhi)
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
flush_work(&nhi->interrupt_work);
}
mutex_destroy(&nhi->lock);
ida_destroy(&nhi->msix_ida);
}

Expand Down Expand Up @@ -856,7 +863,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}

mutex_init(&nhi->lock);
spin_lock_init(&nhi->lock);

pci_set_master(pdev);

Expand Down
2 changes: 1 addition & 1 deletion include/linux/thunderbolt.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @hop_count: Number of rings (end point hops) supported by NHI.
*/
struct tb_nhi {
struct mutex lock;
spinlock_t lock;
struct pci_dev *pdev;
void __iomem *iobase;
struct tb_ring **tx_rings;
Expand Down

0 comments on commit 59120e0

Please sign in to comment.