Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
b794eec
Breadcrumbs
linux
/
drivers
/
net
/
ethernet
/
intel
/
ice
/
ice_idc.c
Blame
Blame
Latest commit
History
History
345 lines (290 loc) · 7.54 KB
Breadcrumbs
linux
/
drivers
/
net
/
ethernet
/
intel
/
ice
/
ice_idc.c
Top
File metadata and controls
Code
Blame
345 lines (290 loc) · 7.54 KB
Raw
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2021, Intel Corporation. */ /* Inter-Driver Communication */ #include "ice.h" #include "ice_lib.h" #include "ice_dcb_lib.h" /** * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct * @pf: pointer to PF struct * * This function has to be called with a device_lock on the * pf->adev.dev to avoid race conditions. */ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) { struct auxiliary_device *adev; adev = pf->adev; if (!adev || !adev->dev.driver) return NULL; return container_of(adev->dev.driver, struct iidc_auxiliary_drv, adrv.driver); } /** * ice_send_event_to_aux - send event to RDMA AUX driver * @pf: pointer to PF struct * @event: event struct */ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) { struct iidc_auxiliary_drv *iadrv; if (!pf->adev) return; device_lock(&pf->adev->dev); iadrv = ice_get_auxiliary_drv(pf); if (iadrv && iadrv->event_handler) iadrv->event_handler(pf, event); device_unlock(&pf->adev->dev); } /** * ice_find_vsi - Find the VSI from VSI ID * @pf: The PF pointer to search in * @vsi_num: The VSI ID to search for */ static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) { int i; ice_for_each_vsi(pf, i) if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) return pf->vsi[i]; return NULL; } /** * ice_add_rdma_qset - Add Leaf Node for RDMA Qset * @pf: PF struct * @qset: Resource to be allocated */ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) { u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; struct ice_vsi *vsi; struct device *dev; u32 qset_teid; u16 qs_handle; int status; int i; if (WARN_ON(!pf || !qset)) return -EINVAL; dev = ice_pf_to_dev(pf); if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EINVAL; vsi = ice_get_main_vsi(pf); if (!vsi) { dev_err(dev, "RDMA QSet invalid VSI\n"); return -EINVAL; } ice_for_each_traffic_class(i) max_rdmaqs[i] = 0; max_rdmaqs[qset->tc]++; qs_handle = qset->qs_handle; status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_rdmaqs); if (status) { dev_err(dev, "Failed VSI RDMA Qset config\n"); return status; } status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, &qs_handle, 1, &qset_teid); if (status) { dev_err(dev, "Failed VSI RDMA Qset enable\n"); return status; } vsi->qset_handle[qset->tc] = qset->qs_handle; qset->teid = qset_teid; return 0; } EXPORT_SYMBOL_GPL(ice_add_rdma_qset); /** * ice_del_rdma_qset - Delete leaf node for RDMA Qset * @pf: PF struct * @qset: Resource to be freed */ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) { struct ice_vsi *vsi; u32 teid; u16 q_id; if (WARN_ON(!pf || !qset)) return -EINVAL; vsi = ice_find_vsi(pf, qset->vport_id); if (!vsi) { dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); return -EINVAL; } q_id = qset->qs_handle; teid = qset->teid; vsi->qset_handle[qset->tc] = 0; return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); } EXPORT_SYMBOL_GPL(ice_del_rdma_qset); /** * ice_rdma_request_reset - accept request from RDMA to perform a reset * @pf: struct for PF * @reset_type: type of reset */ int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) { enum ice_reset_req reset; if (WARN_ON(!pf)) return -EINVAL; switch (reset_type) { case IIDC_PFR: reset = ICE_RESET_PFR; break; case IIDC_CORER: reset = ICE_RESET_CORER; break; case IIDC_GLOBR: reset = ICE_RESET_GLOBR; break; default: dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); return -EINVAL; } return ice_schedule_reset(pf, reset); } EXPORT_SYMBOL_GPL(ice_rdma_request_reset); /** * ice_rdma_update_vsi_filter - update main VSI filters for RDMA * @pf: pointer to struct for PF * @vsi_id: VSI HW idx to update filter on * @enable: bool whether to enable or disable filters */ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) { struct ice_vsi *vsi; int status; if (WARN_ON(!pf)) return -EINVAL; vsi = ice_find_vsi(pf, vsi_id); if (!vsi) return -EINVAL; status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable); if (status) { dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n", enable ? "en" : "dis"); } else { if (enable) vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; else vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; } return status; } EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); /** * ice_get_qos_params - parse QoS params for RDMA consumption * @pf: pointer to PF struct * @qos: set of QoS values */ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) { struct ice_dcbx_cfg *dcbx_cfg; unsigned int i; u32 up2tc; dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; qos->pfc_mode = dcbx_cfg->pfc_mode; if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; } EXPORT_SYMBOL_GPL(ice_get_qos_params); /** * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver * @pf: board private structure to initialize */ static int ice_reserve_rdma_qvector(struct ice_pf *pf) { if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { int index; index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, ICE_RES_RDMA_VEC_ID); if (index < 0) return index; pf->num_avail_sw_msix -= pf->num_rdma_msix; pf->rdma_base_vector = (u16)index; } return 0; } /** * ice_adev_release - function to be mapped to AUX dev's release op * @dev: pointer to device to free */ static void ice_adev_release(struct device *dev) { struct iidc_auxiliary_dev *iadev; iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); kfree(iadev); } /** * ice_plug_aux_dev - allocate and register AUX device * @pf: pointer to pf struct */ int ice_plug_aux_dev(struct ice_pf *pf) { struct iidc_auxiliary_dev *iadev; struct auxiliary_device *adev; int ret; /* if this PF doesn't support a technology that requires auxiliary * devices, then gracefully exit */ if (!ice_is_aux_ena(pf)) return 0; iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; adev = &iadev->adev; pf->adev = adev; iadev->pf = pf; adev->id = pf->aux_idx; adev->dev.release = ice_adev_release; adev->dev.parent = &pf->pdev->dev; adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; ret = auxiliary_device_init(adev); if (ret) { pf->adev = NULL; kfree(iadev); return ret; } ret = auxiliary_device_add(adev); if (ret) { pf->adev = NULL; auxiliary_device_uninit(adev); return ret; } return 0; } /* ice_unplug_aux_dev - unregister and free AUX device * @pf: pointer to pf struct */ void ice_unplug_aux_dev(struct ice_pf *pf) { if (!pf->adev) return; auxiliary_device_delete(pf->adev); auxiliary_device_uninit(pf->adev); pf->adev = NULL; } /** * ice_init_rdma - initializes PF for RDMA use * @pf: ptr to ice_pf */ int ice_init_rdma(struct ice_pf *pf) { struct device *dev = &pf->pdev->dev; int ret; /* Reserve vector resources */ ret = ice_reserve_rdma_qvector(pf); if (ret < 0) { dev_err(dev, "failed to reserve vectors for RDMA\n"); return ret; } pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; return ice_plug_aux_dev(pf); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
You can’t perform that action at this time.