Skip to content

Commit

Permalink
ice: add individual interrupt allocation
Browse files Browse the repository at this point in the history
Currently interrupt allocations, depending on a feature are distributed
in batches. Also, after allocation there is a series of operations that
distributes per irq settings through that batch of interrupts.

Although driver does not yet support dynamic interrupt allocation, keep
allocated interrupts in a pool and add allocation abstraction logic to
make code more flexible. Keep per interrupt information in the
ice_q_vector structure, which yields ice_vsi::base_vector redundant.
Also, as a result there are a few functions that can be removed.

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Piotr Raczynski <piotr.raczynski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
  • Loading branch information
Piotr Raczynski authored and Tony Nguyen committed May 16, 2023
1 parent 524012c commit 4aad533
Showing 13 changed files with 165 additions and 282 deletions.
11 changes: 3 additions & 8 deletions drivers/net/ethernet/intel/ice/ice.h
Original file line number Diff line number Diff line change
@@ -105,10 +105,6 @@
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff

#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
@@ -349,7 +345,6 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -480,6 +475,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];

u16 total_events; /* net_dim(): number of interrupts processed */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;

enum ice_pf_flags {
@@ -584,8 +580,7 @@ struct ice_pf {

u32 hw_csum_rx_error;
u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
@@ -671,7 +666,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->oicr_idx;
((struct ice_pf *)hw->back)->oicr_irq.index;
int itr = ICE_ITR_NONE;
u32 val;

5 changes: 2 additions & 3 deletions drivers/net/ethernet/intel/ice/ice_arfs.c
Original file line number Diff line number Diff line change
@@ -596,7 +596,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
int base_idx, i;
int i;

if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -613,10 +613,9 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
if (unlikely(!netdev->rx_cpu_rmap))
return -EINVAL;

base_idx = vsi->base_vector;
ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pci_irq_vector(pf->pdev, base_idx + i))) {
vsi->q_vectors[i]->irq.virq)) {
ice_free_cpu_rx_rmap(vsi);
return -EINVAL;
}
50 changes: 46 additions & 4 deletions drivers/net/ethernet/intel/ice/ice_base.c
Original file line number Diff line number Diff line change
@@ -103,10 +103,10 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
int err;

/* allocate q_vector */
q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
GFP_KERNEL);
q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;

@@ -118,9 +118,34 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_mode = ITR_DYNAMIC;
q_vector->tx.type = ICE_TX_CONTAINER;
q_vector->rx.type = ICE_RX_CONTAINER;
q_vector->irq.index = -ENOENT;

if (vsi->type == ICE_VSI_VF)
if (vsi->type == ICE_VSI_VF) {
q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);

if (ctrl_vsi) {
if (unlikely(!ctrl_vsi->q_vectors)) {
err = -ENOENT;
goto err_free_q_vector;
}

q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
goto skip_alloc;
}
}

q_vector->irq = ice_alloc_irq(pf);
if (q_vector->irq.index < 0) {
err = -ENOMEM;
goto err_free_q_vector;
}

skip_alloc:
q_vector->reg_idx = q_vector->irq.index;

/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
@@ -137,6 +162,11 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
vsi->q_vectors[v_idx] = q_vector;

return 0;

err_free_q_vector:
kfree(q_vector);

return err;
}

/**
@@ -168,7 +198,19 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev)
netif_napi_del(&q_vector->napi);

devm_kfree(dev, q_vector);
/* release MSIX interrupt if q_vector had interrupt allocated */
if (q_vector->irq.index < 0)
goto free_q_vector;

/* only free last VF ctrl vsi interrupt */
if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
ice_get_vf_ctrl_vsi(pf, vsi))
goto free_q_vector;

ice_free_irq(pf, q_vector->irq);

free_q_vector:
kfree(q_vector);
vsi->q_vectors[v_idx] = NULL;
}

2 changes: 1 addition & 1 deletion drivers/net/ethernet/intel/ice/ice_ethtool.c
Original file line number Diff line number Diff line change
@@ -956,7 +956,7 @@ static u64 ice_intr_test(struct net_device *netdev)

netdev_info(netdev, "interrupt test\n");

wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx),
wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M |
GLINT_DYN_CTL_INTENA_MSK_M |
GLINT_DYN_CTL_SWINT_TRIG_M);
45 changes: 23 additions & 22 deletions drivers/net/ethernet/intel/ice/ice_idc.c
Original file line number Diff line number Diff line change
@@ -229,38 +229,33 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
EXPORT_SYMBOL_GPL(ice_get_qos_params);

/**
* ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
* ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
* @pf: board private structure to initialize
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
{
if (ice_is_rdma_ena(pf)) {
int index, i;

index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
ICE_RES_RDMA_VEC_ID);
if (index < 0)
return index;
int i;

pf->msix_entries = kcalloc(pf->num_rdma_msix,
sizeof(*pf->msix_entries),
GFP_KERNEL);
if (!pf->msix_entries) {
ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
ICE_RES_RDMA_VEC_ID);
if (!pf->msix_entries)
return -ENOMEM;
}

pf->num_avail_sw_msix -= pf->num_rdma_msix;

/* RDMA is the only user of pf->msix_entries array */
pf->rdma_base_vector = 0;

for (i = 0; i < pf->num_rdma_msix; i++, index++) {
for (i = 0; i < pf->num_rdma_msix; i++) {
struct msix_entry *entry = &pf->msix_entries[i];
struct msi_map map;

entry->entry = index;
entry->vector = pci_irq_vector(pf->pdev, index);
map = ice_alloc_irq(pf);
if (map.index < 0)
break;

entry->entry = map.index;
entry->vector = map.virq;
}
}
return 0;
@@ -272,15 +267,21 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
*/
static void ice_free_rdma_qvector(struct ice_pf *pf)
{
int i;

if (!pf->msix_entries)
return;

for (i = 0; i < pf->num_rdma_msix; i++) {
struct msi_map map;

map.index = pf->msix_entries[i].entry;
map.virq = pf->msix_entries[i].vector;
ice_free_irq(pf, map);
}

kfree(pf->msix_entries);
pf->msix_entries = NULL;

pf->num_avail_sw_msix -= pf->num_rdma_msix;
ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
ICE_RES_RDMA_VEC_ID);
}

/**
@@ -382,7 +383,7 @@ int ice_init_rdma(struct ice_pf *pf)
}

/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
ret = ice_alloc_rdma_qvectors(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
goto err_reserve_rdma_qvector;
46 changes: 45 additions & 1 deletion drivers/net/ethernet/intel/ice/ice_irq.c
Original file line number Diff line number Diff line change
@@ -194,9 +194,53 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
}

/* populate SW interrupts pool with number of OS granted IRQs. */
pf->num_avail_sw_msix = (u16)vectors;
pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;

return 0;
}

/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
* allocated interrupt appropriately.
*
* This function mimics individual interrupt allocation,
* even interrupts are actually already allocated with
* pci_alloc_irq_vectors. Individual allocation helps
* to track interrupts and simplifies interrupt related
* handling.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
struct msi_map ice_alloc_irq(struct ice_pf *pf)
{
struct msi_map map = { .index = -ENOENT };
int entry;

entry = ice_get_res(pf, pf->irq_tracker);
if (entry < 0)
return map;

map.index = entry;
map.virq = pci_irq_vector(pf->pdev, map.index);

return map;
}

/**
* ice_free_irq - Free interrupt vector
* @pf: board private structure
* @map: map with interrupt details
*
* Remove allocated interrupt from the interrupt tracker
*/
void ice_free_irq(struct ice_pf *pf, struct msi_map map)
{
ice_free_res(pf->irq_tracker, map.index);
}
3 changes: 3 additions & 0 deletions drivers/net/ethernet/intel/ice/ice_irq.h
Original file line number Diff line number Diff line change
@@ -7,4 +7,7 @@
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);

struct msi_map ice_alloc_irq(struct ice_pf *pf);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);

#endif
Loading

0 comments on commit 4aad533

Please sign in to comment.