Skip to content

Commit

Permalink
ixgbe: commonize ixgbe_map_rings_to_vectors to work for all interrupt…
Browse files Browse the repository at this point in the history
… types

This patch makes it so that the map_rings_to_vectors call will work with
all interrupt types.  The advantage to this is that there will now be a
predictable mapping for all given interrupt types.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Sep 16, 2011
1 parent 263a84e commit 4cc6df2
Showing 1 changed file with 34 additions and 54 deletions.
88 changes: 34 additions & 54 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2014,59 +2014,41 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
int q_vectors;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int i, j;
int rqpv, tqpv;
int err = 0;

/* No mapping required if MSI-X is disabled. */
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
goto out;

q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
q_vectors = 1;

/*
* The ideal configuration...
* We have enough vectors to map one per queue.
* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
* group them so there are multiple queues per vector.
*
* Re-adjusting *qpv takes care of the remainder.
*/
if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
for (; v_start < q_vectors && rxr_remaining; v_start++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
map_vector_to_rxq(adapter, v_start, rxr_idx);

for (; txr_idx < txr_remaining; v_start++, txr_idx++)
map_vector_to_txq(adapter, v_start, txr_idx);

goto out;
}

/*
* If we don't have enough vectors for a 1-to-1
* mapping, we'll have to group them so there are
* multiple queues per vector.
* If there are not enough q_vectors for each ring to have it's own
* vector then we must pair up Rx/Tx on a each vector
*/
/* Re-adjusting *qpv takes care of the remainder. */
for (i = v_start; i < q_vectors; i++) {
rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
for (j = 0; j < rqpv; j++) {
map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
if ((v_start + txr_remaining) > q_vectors)
v_start = 0;

for (; v_start < q_vectors && txr_remaining; v_start++) {
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
map_vector_to_txq(adapter, v_start, txr_idx);
}
out:
return err;
}

/**
Expand All @@ -2083,10 +2065,6 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
int vector, err;
int ri = 0, ti = 0;

err = ixgbe_map_rings_to_vectors(adapter);
if (err)
return err;

for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
Expand Down Expand Up @@ -2294,19 +2272,25 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;

if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
/* map all of the rings to the q_vectors */
ixgbe_map_rings_to_vectors(adapter);

if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, adapter);
} else {
else
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
}

if (err)
if (err) {
e_err(probe, "request_irq failed, Error %d\n", err);

/* place q_vectors and rings back into a known good state */
ixgbe_reset_q_vectors(adapter);
}

return err;
}

Expand All @@ -2316,11 +2300,10 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
int i, q_vectors;

q_vectors = adapter->num_msix_vectors;

i = q_vectors - 1;
free_irq(adapter->msix_entries[i].vector, adapter);

i--;

for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
if (!adapter->q_vector[i]->rx.ring &&
Expand Down Expand Up @@ -2387,9 +2370,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);

map_vector_to_rxq(adapter, 0, 0);
map_vector_to_txq(adapter, 0, 0);

e_info(hw, "Legacy interrupt IVAR setup done\n");
}

Expand Down

0 comments on commit 4cc6df2

Please sign in to comment.