1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8 #include "ice_ptp_hw.h"
9
10 #define ICE_PF_RESET_WAIT_COUNT 300
11
12 static const char * const ice_link_mode_str_low[] = {
13 [0] = "100BASE_TX",
14 [1] = "100M_SGMII",
15 [2] = "1000BASE_T",
16 [3] = "1000BASE_SX",
17 [4] = "1000BASE_LX",
18 [5] = "1000BASE_KX",
19 [6] = "1G_SGMII",
20 [7] = "2500BASE_T",
21 [8] = "2500BASE_X",
22 [9] = "2500BASE_KX",
23 [10] = "5GBASE_T",
24 [11] = "5GBASE_KR",
25 [12] = "10GBASE_T",
26 [13] = "10G_SFI_DA",
27 [14] = "10GBASE_SR",
28 [15] = "10GBASE_LR",
29 [16] = "10GBASE_KR_CR1",
30 [17] = "10G_SFI_AOC_ACC",
31 [18] = "10G_SFI_C2C",
32 [19] = "25GBASE_T",
33 [20] = "25GBASE_CR",
34 [21] = "25GBASE_CR_S",
35 [22] = "25GBASE_CR1",
36 [23] = "25GBASE_SR",
37 [24] = "25GBASE_LR",
38 [25] = "25GBASE_KR",
39 [26] = "25GBASE_KR_S",
40 [27] = "25GBASE_KR1",
41 [28] = "25G_AUI_AOC_ACC",
42 [29] = "25G_AUI_C2C",
43 [30] = "40GBASE_CR4",
44 [31] = "40GBASE_SR4",
45 [32] = "40GBASE_LR4",
46 [33] = "40GBASE_KR4",
47 [34] = "40G_XLAUI_AOC_ACC",
48 [35] = "40G_XLAUI",
49 [36] = "50GBASE_CR2",
50 [37] = "50GBASE_SR2",
51 [38] = "50GBASE_LR2",
52 [39] = "50GBASE_KR2",
53 [40] = "50G_LAUI2_AOC_ACC",
54 [41] = "50G_LAUI2",
55 [42] = "50G_AUI2_AOC_ACC",
56 [43] = "50G_AUI2",
57 [44] = "50GBASE_CP",
58 [45] = "50GBASE_SR",
59 [46] = "50GBASE_FR",
60 [47] = "50GBASE_LR",
61 [48] = "50GBASE_KR_PAM4",
62 [49] = "50G_AUI1_AOC_ACC",
63 [50] = "50G_AUI1",
64 [51] = "100GBASE_CR4",
65 [52] = "100GBASE_SR4",
66 [53] = "100GBASE_LR4",
67 [54] = "100GBASE_KR4",
68 [55] = "100G_CAUI4_AOC_ACC",
69 [56] = "100G_CAUI4",
70 [57] = "100G_AUI4_AOC_ACC",
71 [58] = "100G_AUI4",
72 [59] = "100GBASE_CR_PAM4",
73 [60] = "100GBASE_KR_PAM4",
74 [61] = "100GBASE_CP2",
75 [62] = "100GBASE_SR2",
76 [63] = "100GBASE_DR",
77 };
78
79 static const char * const ice_link_mode_str_high[] = {
80 [0] = "100GBASE_KR2_PAM4",
81 [1] = "100G_CAUI2_AOC_ACC",
82 [2] = "100G_CAUI2",
83 [3] = "100G_AUI2_AOC_ACC",
84 [4] = "100G_AUI2",
85 };
86
87 /**
88 * ice_dump_phy_type - helper function to dump phy_type
89 * @hw: pointer to the HW structure
90 * @low: 64 bit value for phy_type_low
91 * @high: 64 bit value for phy_type_high
92 * @prefix: prefix string to differentiate multiple dumps
93 */
94 static void
ice_dump_phy_type(struct ice_hw * hw,u64 low,u64 high,const char * prefix)95 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
96 {
97 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
98
99 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
100 if (low & BIT_ULL(i))
101 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
102 prefix, i, ice_link_mode_str_low[i]);
103 }
104
105 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
106
107 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
108 if (high & BIT_ULL(i))
109 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
110 prefix, i, ice_link_mode_str_high[i]);
111 }
112 }
113
114 /**
115 * ice_set_mac_type - Sets MAC type
116 * @hw: pointer to the HW structure
117 *
118 * This function sets the MAC type of the adapter based on the
119 * vendor ID and device ID stored in the HW structure.
120 */
ice_set_mac_type(struct ice_hw * hw)121 static int ice_set_mac_type(struct ice_hw *hw)
122 {
123 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
124 return -ENODEV;
125
126 switch (hw->device_id) {
127 case ICE_DEV_ID_E810C_BACKPLANE:
128 case ICE_DEV_ID_E810C_QSFP:
129 case ICE_DEV_ID_E810C_SFP:
130 case ICE_DEV_ID_E810_XXV_BACKPLANE:
131 case ICE_DEV_ID_E810_XXV_QSFP:
132 case ICE_DEV_ID_E810_XXV_SFP:
133 hw->mac_type = ICE_MAC_E810;
134 break;
135 case ICE_DEV_ID_E823C_10G_BASE_T:
136 case ICE_DEV_ID_E823C_BACKPLANE:
137 case ICE_DEV_ID_E823C_QSFP:
138 case ICE_DEV_ID_E823C_SFP:
139 case ICE_DEV_ID_E823C_SGMII:
140 case ICE_DEV_ID_E822C_10G_BASE_T:
141 case ICE_DEV_ID_E822C_BACKPLANE:
142 case ICE_DEV_ID_E822C_QSFP:
143 case ICE_DEV_ID_E822C_SFP:
144 case ICE_DEV_ID_E822C_SGMII:
145 case ICE_DEV_ID_E822L_10G_BASE_T:
146 case ICE_DEV_ID_E822L_BACKPLANE:
147 case ICE_DEV_ID_E822L_SFP:
148 case ICE_DEV_ID_E822L_SGMII:
149 case ICE_DEV_ID_E823L_10G_BASE_T:
150 case ICE_DEV_ID_E823L_1GBE:
151 case ICE_DEV_ID_E823L_BACKPLANE:
152 case ICE_DEV_ID_E823L_QSFP:
153 case ICE_DEV_ID_E823L_SFP:
154 hw->mac_type = ICE_MAC_GENERIC;
155 break;
156 default:
157 hw->mac_type = ICE_MAC_UNKNOWN;
158 break;
159 }
160
161 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
162 return 0;
163 }
164
165 /**
166 * ice_is_e810
167 * @hw: pointer to the hardware structure
168 *
169 * returns true if the device is E810 based, false if not.
170 */
ice_is_e810(struct ice_hw * hw)171 bool ice_is_e810(struct ice_hw *hw)
172 {
173 return hw->mac_type == ICE_MAC_E810;
174 }
175
176 /**
177 * ice_is_e810t
178 * @hw: pointer to the hardware structure
179 *
180 * returns true if the device is E810T based, false if not.
181 */
ice_is_e810t(struct ice_hw * hw)182 bool ice_is_e810t(struct ice_hw *hw)
183 {
184 switch (hw->device_id) {
185 case ICE_DEV_ID_E810C_SFP:
186 switch (hw->subsystem_device_id) {
187 case ICE_SUBDEV_ID_E810T:
188 case ICE_SUBDEV_ID_E810T2:
189 case ICE_SUBDEV_ID_E810T3:
190 case ICE_SUBDEV_ID_E810T4:
191 case ICE_SUBDEV_ID_E810T6:
192 case ICE_SUBDEV_ID_E810T7:
193 return true;
194 }
195 break;
196 case ICE_DEV_ID_E810C_QSFP:
197 switch (hw->subsystem_device_id) {
198 case ICE_SUBDEV_ID_E810T2:
199 case ICE_SUBDEV_ID_E810T3:
200 case ICE_SUBDEV_ID_E810T5:
201 return true;
202 }
203 break;
204 default:
205 break;
206 }
207
208 return false;
209 }
210
211 /**
212 * ice_is_e823
213 * @hw: pointer to the hardware structure
214 *
215 * returns true if the device is E823-L or E823-C based, false if not.
216 */
ice_is_e823(struct ice_hw * hw)217 bool ice_is_e823(struct ice_hw *hw)
218 {
219 switch (hw->device_id) {
220 case ICE_DEV_ID_E823L_BACKPLANE:
221 case ICE_DEV_ID_E823L_SFP:
222 case ICE_DEV_ID_E823L_10G_BASE_T:
223 case ICE_DEV_ID_E823L_1GBE:
224 case ICE_DEV_ID_E823L_QSFP:
225 case ICE_DEV_ID_E823C_BACKPLANE:
226 case ICE_DEV_ID_E823C_QSFP:
227 case ICE_DEV_ID_E823C_SFP:
228 case ICE_DEV_ID_E823C_10G_BASE_T:
229 case ICE_DEV_ID_E823C_SGMII:
230 return true;
231 default:
232 return false;
233 }
234 }
235
236 /**
237 * ice_clear_pf_cfg - Clear PF configuration
238 * @hw: pointer to the hardware structure
239 *
240 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
241 * configuration, flow director filters, etc.).
242 */
ice_clear_pf_cfg(struct ice_hw * hw)243 int ice_clear_pf_cfg(struct ice_hw *hw)
244 {
245 struct ice_aq_desc desc;
246
247 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
248
249 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
250 }
251
252 /**
253 * ice_aq_manage_mac_read - manage MAC address read command
254 * @hw: pointer to the HW struct
255 * @buf: a virtual buffer to hold the manage MAC read response
256 * @buf_size: Size of the virtual buffer
257 * @cd: pointer to command details structure or NULL
258 *
259 * This function is used to return per PF station MAC address (0x0107).
260 * NOTE: Upon successful completion of this command, MAC address information
261 * is returned in user specified buffer. Please interpret user specified
262 * buffer as "manage_mac_read" response.
263 * Response such as various MAC addresses are stored in HW struct (port.mac)
264 * ice_discover_dev_caps is expected to be called before this function is
265 * called.
266 */
267 static int
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)268 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
269 struct ice_sq_cd *cd)
270 {
271 struct ice_aqc_manage_mac_read_resp *resp;
272 struct ice_aqc_manage_mac_read *cmd;
273 struct ice_aq_desc desc;
274 int status;
275 u16 flags;
276 u8 i;
277
278 cmd = &desc.params.mac_read;
279
280 if (buf_size < sizeof(*resp))
281 return -EINVAL;
282
283 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
284
285 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
286 if (status)
287 return status;
288
289 resp = buf;
290 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
291
292 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
293 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
294 return -EIO;
295 }
296
297 /* A single port can report up to two (LAN and WoL) addresses */
298 for (i = 0; i < cmd->num_addr; i++)
299 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
300 ether_addr_copy(hw->port_info->mac.lan_addr,
301 resp[i].mac_addr);
302 ether_addr_copy(hw->port_info->mac.perm_addr,
303 resp[i].mac_addr);
304 break;
305 }
306
307 return 0;
308 }
309
310 /**
311 * ice_aq_get_phy_caps - returns PHY capabilities
312 * @pi: port information structure
313 * @qual_mods: report qualified modules
314 * @report_mode: report mode capabilities
315 * @pcaps: structure for PHY capabilities to be filled
316 * @cd: pointer to command details structure or NULL
317 *
318 * Returns the various PHY capabilities supported on the Port (0x0600)
319 */
320 int
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)321 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
322 struct ice_aqc_get_phy_caps_data *pcaps,
323 struct ice_sq_cd *cd)
324 {
325 struct ice_aqc_get_phy_caps *cmd;
326 u16 pcaps_size = sizeof(*pcaps);
327 struct ice_aq_desc desc;
328 const char *prefix;
329 struct ice_hw *hw;
330 int status;
331
332 cmd = &desc.params.get_phy;
333
334 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
335 return -EINVAL;
336 hw = pi->hw;
337
338 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
339 !ice_fw_supports_report_dflt_cfg(hw))
340 return -EINVAL;
341
342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
343
344 if (qual_mods)
345 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
346
347 cmd->param0 |= cpu_to_le16(report_mode);
348 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
349
350 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
351
352 switch (report_mode) {
353 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
354 prefix = "phy_caps_media";
355 break;
356 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
357 prefix = "phy_caps_no_media";
358 break;
359 case ICE_AQC_REPORT_ACTIVE_CFG:
360 prefix = "phy_caps_active";
361 break;
362 case ICE_AQC_REPORT_DFLT_CFG:
363 prefix = "phy_caps_default";
364 break;
365 default:
366 prefix = "phy_caps_invalid";
367 }
368
369 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
370 le64_to_cpu(pcaps->phy_type_high), prefix);
371
372 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
373 prefix, report_mode);
374 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
375 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
376 pcaps->low_power_ctrl_an);
377 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
378 pcaps->eee_cap);
379 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
380 pcaps->eeer_value);
381 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
382 pcaps->link_fec_options);
383 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
384 prefix, pcaps->module_compliance_enforcement);
385 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
386 prefix, pcaps->extended_compliance_code);
387 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
388 pcaps->module_type[0]);
389 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
390 pcaps->module_type[1]);
391 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
392 pcaps->module_type[2]);
393
394 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
395 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
396 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
397 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
398 sizeof(pi->phy.link_info.module_type));
399 }
400
401 return status;
402 }
403
404 /**
405 * ice_aq_get_link_topo_handle - get link topology node return status
406 * @pi: port information structure
407 * @node_type: requested node type
408 * @cd: pointer to command details structure or NULL
409 *
410 * Get link topology node return status for specified node type (0x06E0)
411 *
412 * Node type cage can be used to determine if cage is present. If AQC
413 * returns error (ENOENT), then no cage present. If no cage present, then
414 * connection type is backplane or BASE-T.
415 */
416 static int
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)417 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
418 struct ice_sq_cd *cd)
419 {
420 struct ice_aqc_get_link_topo *cmd;
421 struct ice_aq_desc desc;
422
423 cmd = &desc.params.get_link_topo;
424
425 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
426
427 cmd->addr.topo_params.node_type_ctx =
428 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
429 ICE_AQC_LINK_TOPO_NODE_CTX_S);
430
431 /* set node type */
432 cmd->addr.topo_params.node_type_ctx |=
433 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
434
435 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
436 }
437
438 /**
439 * ice_is_media_cage_present
440 * @pi: port information structure
441 *
442 * Returns true if media cage is present, else false. If no cage, then
443 * media type is backplane or BASE-T.
444 */
ice_is_media_cage_present(struct ice_port_info * pi)445 static bool ice_is_media_cage_present(struct ice_port_info *pi)
446 {
447 /* Node type cage can be used to determine if cage is present. If AQC
448 * returns error (ENOENT), then no cage present. If no cage present then
449 * connection type is backplane or BASE-T.
450 */
451 return !ice_aq_get_link_topo_handle(pi,
452 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
453 NULL);
454 }
455
456 /**
457 * ice_get_media_type - Gets media type
458 * @pi: port information structure
459 */
ice_get_media_type(struct ice_port_info * pi)460 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
461 {
462 struct ice_link_status *hw_link_info;
463
464 if (!pi)
465 return ICE_MEDIA_UNKNOWN;
466
467 hw_link_info = &pi->phy.link_info;
468 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
469 /* If more than one media type is selected, report unknown */
470 return ICE_MEDIA_UNKNOWN;
471
472 if (hw_link_info->phy_type_low) {
473 /* 1G SGMII is a special case where some DA cable PHYs
474 * may show this as an option when it really shouldn't
475 * be since SGMII is meant to be between a MAC and a PHY
476 * in a backplane. Try to detect this case and handle it
477 */
478 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
479 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
480 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
481 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
482 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
483 return ICE_MEDIA_DA;
484
485 switch (hw_link_info->phy_type_low) {
486 case ICE_PHY_TYPE_LOW_1000BASE_SX:
487 case ICE_PHY_TYPE_LOW_1000BASE_LX:
488 case ICE_PHY_TYPE_LOW_10GBASE_SR:
489 case ICE_PHY_TYPE_LOW_10GBASE_LR:
490 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
491 case ICE_PHY_TYPE_LOW_25GBASE_SR:
492 case ICE_PHY_TYPE_LOW_25GBASE_LR:
493 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
494 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
495 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
496 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
497 case ICE_PHY_TYPE_LOW_50GBASE_SR:
498 case ICE_PHY_TYPE_LOW_50GBASE_FR:
499 case ICE_PHY_TYPE_LOW_50GBASE_LR:
500 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
501 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
502 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
503 case ICE_PHY_TYPE_LOW_100GBASE_DR:
504 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
505 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
506 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
507 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
508 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
509 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
510 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
511 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
512 return ICE_MEDIA_FIBER;
513 case ICE_PHY_TYPE_LOW_100BASE_TX:
514 case ICE_PHY_TYPE_LOW_1000BASE_T:
515 case ICE_PHY_TYPE_LOW_2500BASE_T:
516 case ICE_PHY_TYPE_LOW_5GBASE_T:
517 case ICE_PHY_TYPE_LOW_10GBASE_T:
518 case ICE_PHY_TYPE_LOW_25GBASE_T:
519 return ICE_MEDIA_BASET;
520 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
521 case ICE_PHY_TYPE_LOW_25GBASE_CR:
522 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
523 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
524 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
525 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
526 case ICE_PHY_TYPE_LOW_50GBASE_CP:
527 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
528 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
529 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
530 return ICE_MEDIA_DA;
531 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
532 case ICE_PHY_TYPE_LOW_40G_XLAUI:
533 case ICE_PHY_TYPE_LOW_50G_LAUI2:
534 case ICE_PHY_TYPE_LOW_50G_AUI2:
535 case ICE_PHY_TYPE_LOW_50G_AUI1:
536 case ICE_PHY_TYPE_LOW_100G_AUI4:
537 case ICE_PHY_TYPE_LOW_100G_CAUI4:
538 if (ice_is_media_cage_present(pi))
539 return ICE_MEDIA_DA;
540 fallthrough;
541 case ICE_PHY_TYPE_LOW_1000BASE_KX:
542 case ICE_PHY_TYPE_LOW_2500BASE_KX:
543 case ICE_PHY_TYPE_LOW_2500BASE_X:
544 case ICE_PHY_TYPE_LOW_5GBASE_KR:
545 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
546 case ICE_PHY_TYPE_LOW_25GBASE_KR:
547 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
548 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
549 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
550 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
551 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
552 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
553 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
554 return ICE_MEDIA_BACKPLANE;
555 }
556 } else {
557 switch (hw_link_info->phy_type_high) {
558 case ICE_PHY_TYPE_HIGH_100G_AUI2:
559 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
560 if (ice_is_media_cage_present(pi))
561 return ICE_MEDIA_DA;
562 fallthrough;
563 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
564 return ICE_MEDIA_BACKPLANE;
565 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
566 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
567 return ICE_MEDIA_FIBER;
568 }
569 }
570 return ICE_MEDIA_UNKNOWN;
571 }
572
573 /**
574 * ice_aq_get_link_info
575 * @pi: port information structure
576 * @ena_lse: enable/disable LinkStatusEvent reporting
577 * @link: pointer to link status structure - optional
578 * @cd: pointer to command details structure or NULL
579 *
580 * Get Link Status (0x607). Returns the link status of the adapter.
581 */
582 int
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)583 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
584 struct ice_link_status *link, struct ice_sq_cd *cd)
585 {
586 struct ice_aqc_get_link_status_data link_data = { 0 };
587 struct ice_aqc_get_link_status *resp;
588 struct ice_link_status *li_old, *li;
589 enum ice_media_type *hw_media_type;
590 struct ice_fc_info *hw_fc_info;
591 bool tx_pause, rx_pause;
592 struct ice_aq_desc desc;
593 struct ice_hw *hw;
594 u16 cmd_flags;
595 int status;
596
597 if (!pi)
598 return -EINVAL;
599 hw = pi->hw;
600 li_old = &pi->phy.link_info_old;
601 hw_media_type = &pi->phy.media_type;
602 li = &pi->phy.link_info;
603 hw_fc_info = &pi->fc;
604
605 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
606 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
607 resp = &desc.params.get_link_status;
608 resp->cmd_flags = cpu_to_le16(cmd_flags);
609 resp->lport_num = pi->lport;
610
611 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
612
613 if (status)
614 return status;
615
616 /* save off old link status information */
617 *li_old = *li;
618
619 /* update current link status information */
620 li->link_speed = le16_to_cpu(link_data.link_speed);
621 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
622 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
623 *hw_media_type = ice_get_media_type(pi);
624 li->link_info = link_data.link_info;
625 li->link_cfg_err = link_data.link_cfg_err;
626 li->an_info = link_data.an_info;
627 li->ext_info = link_data.ext_info;
628 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
629 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
630 li->topo_media_conflict = link_data.topo_media_conflict;
631 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
632 ICE_AQ_CFG_PACING_TYPE_M);
633
634 /* update fc info */
635 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
636 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
637 if (tx_pause && rx_pause)
638 hw_fc_info->current_mode = ICE_FC_FULL;
639 else if (tx_pause)
640 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
641 else if (rx_pause)
642 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
643 else
644 hw_fc_info->current_mode = ICE_FC_NONE;
645
646 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
647
648 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
649 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
650 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
651 (unsigned long long)li->phy_type_low);
652 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
653 (unsigned long long)li->phy_type_high);
654 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
655 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
656 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
657 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
658 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
659 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
660 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
661 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
662 li->max_frame_size);
663 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
664
665 /* save link status information */
666 if (link)
667 *link = *li;
668
669 /* flag cleared so calling functions don't call AQ again */
670 pi->phy.get_link_info = false;
671
672 return 0;
673 }
674
675 /**
676 * ice_fill_tx_timer_and_fc_thresh
677 * @hw: pointer to the HW struct
678 * @cmd: pointer to MAC cfg structure
679 *
680 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
681 * descriptor
682 */
683 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)684 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
685 struct ice_aqc_set_mac_cfg *cmd)
686 {
687 u16 fc_thres_val, tx_timer_val;
688 u32 val;
689
690 /* We read back the transmit timer and FC threshold value of
691 * LFC. Thus, we will use index =
692 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
693 *
694 * Also, because we are operating on transmit timer and FC
695 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
696 */
697 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
698
699 /* Retrieve the transmit timer */
700 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
701 tx_timer_val = val &
702 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
703 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
704
705 /* Retrieve the FC threshold */
706 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
707 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
708
709 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
710 }
711
712 /**
713 * ice_aq_set_mac_cfg
714 * @hw: pointer to the HW struct
715 * @max_frame_size: Maximum Frame Size to be supported
716 * @cd: pointer to command details structure or NULL
717 *
718 * Set MAC configuration (0x0603)
719 */
720 int
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)721 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
722 {
723 struct ice_aqc_set_mac_cfg *cmd;
724 struct ice_aq_desc desc;
725
726 cmd = &desc.params.set_mac_cfg;
727
728 if (max_frame_size == 0)
729 return -EINVAL;
730
731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
732
733 cmd->max_frame_size = cpu_to_le16(max_frame_size);
734
735 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
736
737 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
738 }
739
740 /**
741 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
742 * @hw: pointer to the HW struct
743 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)744 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
745 {
746 struct ice_switch_info *sw;
747 int status;
748
749 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
750 sizeof(*hw->switch_info), GFP_KERNEL);
751 sw = hw->switch_info;
752
753 if (!sw)
754 return -ENOMEM;
755
756 INIT_LIST_HEAD(&sw->vsi_list_map_head);
757 sw->prof_res_bm_init = 0;
758
759 status = ice_init_def_sw_recp(hw);
760 if (status) {
761 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
762 return status;
763 }
764 return 0;
765 }
766
767 /**
768 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
769 * @hw: pointer to the HW struct
770 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)771 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
772 {
773 struct ice_switch_info *sw = hw->switch_info;
774 struct ice_vsi_list_map_info *v_pos_map;
775 struct ice_vsi_list_map_info *v_tmp_map;
776 struct ice_sw_recipe *recps;
777 u8 i;
778
779 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
780 list_entry) {
781 list_del(&v_pos_map->list_entry);
782 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
783 }
784 recps = sw->recp_list;
785 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
786 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
787
788 recps[i].root_rid = i;
789 list_for_each_entry_safe(rg_entry, tmprg_entry,
790 &recps[i].rg_list, l_entry) {
791 list_del(&rg_entry->l_entry);
792 devm_kfree(ice_hw_to_dev(hw), rg_entry);
793 }
794
795 if (recps[i].adv_rule) {
796 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
797 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
798
799 mutex_destroy(&recps[i].filt_rule_lock);
800 list_for_each_entry_safe(lst_itr, tmp_entry,
801 &recps[i].filt_rules,
802 list_entry) {
803 list_del(&lst_itr->list_entry);
804 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
805 devm_kfree(ice_hw_to_dev(hw), lst_itr);
806 }
807 } else {
808 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
809
810 mutex_destroy(&recps[i].filt_rule_lock);
811 list_for_each_entry_safe(lst_itr, tmp_entry,
812 &recps[i].filt_rules,
813 list_entry) {
814 list_del(&lst_itr->list_entry);
815 devm_kfree(ice_hw_to_dev(hw), lst_itr);
816 }
817 }
818 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
819 }
820 ice_rm_all_sw_replay_rule_info(hw);
821 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
822 devm_kfree(ice_hw_to_dev(hw), sw);
823 }
824
825 /**
826 * ice_get_fw_log_cfg - get FW logging configuration
827 * @hw: pointer to the HW struct
828 */
ice_get_fw_log_cfg(struct ice_hw * hw)829 static int ice_get_fw_log_cfg(struct ice_hw *hw)
830 {
831 struct ice_aq_desc desc;
832 __le16 *config;
833 int status;
834 u16 size;
835
836 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
837 config = kzalloc(size, GFP_KERNEL);
838 if (!config)
839 return -ENOMEM;
840
841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
842
843 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
844 if (!status) {
845 u16 i;
846
847 /* Save FW logging information into the HW structure */
848 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
849 u16 v, m, flgs;
850
851 v = le16_to_cpu(config[i]);
852 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
853 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
854
855 if (m < ICE_AQC_FW_LOG_ID_MAX)
856 hw->fw_log.evnts[m].cur = flgs;
857 }
858 }
859
860 kfree(config);
861
862 return status;
863 }
864
865 /**
866 * ice_cfg_fw_log - configure FW logging
867 * @hw: pointer to the HW struct
868 * @enable: enable certain FW logging events if true, disable all if false
869 *
870 * This function enables/disables the FW logging via Rx CQ events and a UART
871 * port based on predetermined configurations. FW logging via the Rx CQ can be
872 * enabled/disabled for individual PF's. However, FW logging via the UART can
873 * only be enabled/disabled for all PFs on the same device.
874 *
875 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
876 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
877 * before initializing the device.
878 *
879 * When re/configuring FW logging, callers need to update the "cfg" elements of
880 * the hw->fw_log.evnts array with the desired logging event configurations for
881 * modules of interest. When disabling FW logging completely, the callers can
882 * just pass false in the "enable" parameter. On completion, the function will
883 * update the "cur" element of the hw->fw_log.evnts array with the resulting
884 * logging event configurations of the modules that are being re/configured. FW
885 * logging modules that are not part of a reconfiguration operation retain their
886 * previous states.
887 *
888 * Before resetting the device, it is recommended that the driver disables FW
889 * logging before shutting down the control queue. When disabling FW logging
890 * ("enable" = false), the latest configurations of FW logging events stored in
891 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
892 * a device reset.
893 *
894 * When enabling FW logging to emit log messages via the Rx CQ during the
895 * device's initialization phase, a mechanism alternative to interrupt handlers
896 * needs to be used to extract FW log messages from the Rx CQ periodically and
897 * to prevent the Rx CQ from being full and stalling other types of control
898 * messages from FW to SW. Interrupts are typically disabled during the device's
899 * initialization phase.
900 */
ice_cfg_fw_log(struct ice_hw * hw,bool enable)901 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
902 {
903 struct ice_aqc_fw_logging *cmd;
904 u16 i, chgs = 0, len = 0;
905 struct ice_aq_desc desc;
906 __le16 *data = NULL;
907 u8 actv_evnts = 0;
908 void *buf = NULL;
909 int status = 0;
910
911 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
912 return 0;
913
914 /* Disable FW logging only when the control queue is still responsive */
915 if (!enable &&
916 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
917 return 0;
918
919 /* Get current FW log settings */
920 status = ice_get_fw_log_cfg(hw);
921 if (status)
922 return status;
923
924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
925 cmd = &desc.params.fw_logging;
926
927 /* Indicate which controls are valid */
928 if (hw->fw_log.cq_en)
929 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
930
931 if (hw->fw_log.uart_en)
932 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
933
934 if (enable) {
935 /* Fill in an array of entries with FW logging modules and
936 * logging events being reconfigured.
937 */
938 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
939 u16 val;
940
941 /* Keep track of enabled event types */
942 actv_evnts |= hw->fw_log.evnts[i].cfg;
943
944 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
945 continue;
946
947 if (!data) {
948 data = devm_kcalloc(ice_hw_to_dev(hw),
949 ICE_AQC_FW_LOG_ID_MAX,
950 sizeof(*data),
951 GFP_KERNEL);
952 if (!data)
953 return -ENOMEM;
954 }
955
956 val = i << ICE_AQC_FW_LOG_ID_S;
957 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
958 data[chgs++] = cpu_to_le16(val);
959 }
960
961 /* Only enable FW logging if at least one module is specified.
962 * If FW logging is currently enabled but all modules are not
963 * enabled to emit log messages, disable FW logging altogether.
964 */
965 if (actv_evnts) {
966 /* Leave if there is effectively no change */
967 if (!chgs)
968 goto out;
969
970 if (hw->fw_log.cq_en)
971 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
972
973 if (hw->fw_log.uart_en)
974 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
975
976 buf = data;
977 len = sizeof(*data) * chgs;
978 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
979 }
980 }
981
982 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
983 if (!status) {
984 /* Update the current configuration to reflect events enabled.
985 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
986 * logging mode is enabled for the device. They do not reflect
987 * actual modules being enabled to emit log messages. So, their
988 * values remain unchanged even when all modules are disabled.
989 */
990 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
991
992 hw->fw_log.actv_evnts = actv_evnts;
993 for (i = 0; i < cnt; i++) {
994 u16 v, m;
995
996 if (!enable) {
997 /* When disabling all FW logging events as part
998 * of device's de-initialization, the original
999 * configurations are retained, and can be used
1000 * to reconfigure FW logging later if the device
1001 * is re-initialized.
1002 */
1003 hw->fw_log.evnts[i].cur = 0;
1004 continue;
1005 }
1006
1007 v = le16_to_cpu(data[i]);
1008 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
1009 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
1010 }
1011 }
1012
1013 out:
1014 devm_kfree(ice_hw_to_dev(hw), data);
1015
1016 return status;
1017 }
1018
1019 /**
1020 * ice_output_fw_log
1021 * @hw: pointer to the HW struct
1022 * @desc: pointer to the AQ message descriptor
1023 * @buf: pointer to the buffer accompanying the AQ message
1024 *
1025 * Formats a FW Log message and outputs it via the standard driver logs.
1026 */
ice_output_fw_log(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf)1027 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
1028 {
1029 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
1030 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
1031 le16_to_cpu(desc->datalen));
1032 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
1033 }
1034
1035 /**
1036 * ice_get_itr_intrl_gran
1037 * @hw: pointer to the HW struct
1038 *
1039 * Determines the ITR/INTRL granularities based on the maximum aggregate
1040 * bandwidth according to the device's configuration during power-on.
1041 */
ice_get_itr_intrl_gran(struct ice_hw * hw)1042 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
1043 {
1044 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
1045 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
1046 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
1047
1048 switch (max_agg_bw) {
1049 case ICE_MAX_AGG_BW_200G:
1050 case ICE_MAX_AGG_BW_100G:
1051 case ICE_MAX_AGG_BW_50G:
1052 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
1053 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
1054 break;
1055 case ICE_MAX_AGG_BW_25G:
1056 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
1057 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
1058 break;
1059 }
1060 }
1061
1062 /**
1063 * ice_init_hw - main hardware initialization routine
1064 * @hw: pointer to the hardware structure
1065 */
ice_init_hw(struct ice_hw * hw)1066 int ice_init_hw(struct ice_hw *hw)
1067 {
1068 struct ice_aqc_get_phy_caps_data *pcaps;
1069 u16 mac_buf_len;
1070 void *mac_buf;
1071 int status;
1072
1073 /* Set MAC type based on DeviceID */
1074 status = ice_set_mac_type(hw);
1075 if (status)
1076 return status;
1077
1078 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1079 PF_FUNC_RID_FUNC_NUM_M) >>
1080 PF_FUNC_RID_FUNC_NUM_S;
1081
1082 status = ice_reset(hw, ICE_RESET_PFR);
1083 if (status)
1084 return status;
1085
1086 ice_get_itr_intrl_gran(hw);
1087
1088 status = ice_create_all_ctrlq(hw);
1089 if (status)
1090 goto err_unroll_cqinit;
1091
1092 /* Enable FW logging. Not fatal if this fails. */
1093 status = ice_cfg_fw_log(hw, true);
1094 if (status)
1095 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
1096
1097 status = ice_clear_pf_cfg(hw);
1098 if (status)
1099 goto err_unroll_cqinit;
1100
1101 /* Set bit to enable Flow Director filters */
1102 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
1103 INIT_LIST_HEAD(&hw->fdir_list_head);
1104
1105 ice_clear_pxe_mode(hw);
1106
1107 status = ice_init_nvm(hw);
1108 if (status)
1109 goto err_unroll_cqinit;
1110
1111 status = ice_get_caps(hw);
1112 if (status)
1113 goto err_unroll_cqinit;
1114
1115 if (!hw->port_info)
1116 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
1117 sizeof(*hw->port_info),
1118 GFP_KERNEL);
1119 if (!hw->port_info) {
1120 status = -ENOMEM;
1121 goto err_unroll_cqinit;
1122 }
1123
1124 /* set the back pointer to HW */
1125 hw->port_info->hw = hw;
1126
1127 /* Initialize port_info struct with switch configuration data */
1128 status = ice_get_initial_sw_cfg(hw);
1129 if (status)
1130 goto err_unroll_alloc;
1131
1132 hw->evb_veb = true;
1133
1134 /* init xarray for identifying scheduling nodes uniquely */
1135 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
1136
1137 /* Query the allocated resources for Tx scheduler */
1138 status = ice_sched_query_res_alloc(hw);
1139 if (status) {
1140 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1141 goto err_unroll_alloc;
1142 }
1143 ice_sched_get_psm_clk_freq(hw);
1144
1145 /* Initialize port_info struct with scheduler data */
1146 status = ice_sched_init_port(hw->port_info);
1147 if (status)
1148 goto err_unroll_sched;
1149
1150 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1151 if (!pcaps) {
1152 status = -ENOMEM;
1153 goto err_unroll_sched;
1154 }
1155
1156 /* Initialize port_info struct with PHY capabilities */
1157 status = ice_aq_get_phy_caps(hw->port_info, false,
1158 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1159 NULL);
1160 devm_kfree(ice_hw_to_dev(hw), pcaps);
1161 if (status)
1162 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
1163 status);
1164
1165 /* Initialize port_info struct with link information */
1166 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1167 if (status)
1168 goto err_unroll_sched;
1169
1170 /* need a valid SW entry point to build a Tx tree */
1171 if (!hw->sw_entry_point_layer) {
1172 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1173 status = -EIO;
1174 goto err_unroll_sched;
1175 }
1176 INIT_LIST_HEAD(&hw->agg_list);
1177 /* Initialize max burst size */
1178 if (!hw->max_burst_size)
1179 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1180
1181 status = ice_init_fltr_mgmt_struct(hw);
1182 if (status)
1183 goto err_unroll_sched;
1184
1185 /* Get MAC information */
1186 /* A single port can report up to two (LAN and WoL) addresses */
1187 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1188 sizeof(struct ice_aqc_manage_mac_read_resp),
1189 GFP_KERNEL);
1190 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1191
1192 if (!mac_buf) {
1193 status = -ENOMEM;
1194 goto err_unroll_fltr_mgmt_struct;
1195 }
1196
1197 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1198 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1199
1200 if (status)
1201 goto err_unroll_fltr_mgmt_struct;
1202 /* enable jumbo frame support at MAC level */
1203 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1204 if (status)
1205 goto err_unroll_fltr_mgmt_struct;
1206 /* Obtain counter base index which would be used by flow director */
1207 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1208 if (status)
1209 goto err_unroll_fltr_mgmt_struct;
1210 status = ice_init_hw_tbls(hw);
1211 if (status)
1212 goto err_unroll_fltr_mgmt_struct;
1213 mutex_init(&hw->tnl_lock);
1214 return 0;
1215
1216 err_unroll_fltr_mgmt_struct:
1217 ice_cleanup_fltr_mgmt_struct(hw);
1218 err_unroll_sched:
1219 ice_sched_cleanup_all(hw);
1220 err_unroll_alloc:
1221 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1222 err_unroll_cqinit:
1223 ice_destroy_all_ctrlq(hw);
1224 return status;
1225 }
1226
1227 /**
1228 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1229 * @hw: pointer to the hardware structure
1230 *
1231 * This should be called only during nominal operation, not as a result of
1232 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1233 * applicable initializations if it fails for any reason.
1234 */
ice_deinit_hw(struct ice_hw * hw)1235 void ice_deinit_hw(struct ice_hw *hw)
1236 {
1237 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1238 ice_cleanup_fltr_mgmt_struct(hw);
1239
1240 ice_sched_cleanup_all(hw);
1241 ice_sched_clear_agg(hw);
1242 ice_free_seg(hw);
1243 ice_free_hw_tbls(hw);
1244 mutex_destroy(&hw->tnl_lock);
1245
1246 /* Attempt to disable FW logging before shutting down control queues */
1247 ice_cfg_fw_log(hw, false);
1248 ice_destroy_all_ctrlq(hw);
1249
1250 /* Clear VSI contexts if not already cleared */
1251 ice_clear_all_vsi_ctx(hw);
1252 }
1253
1254 /**
1255 * ice_check_reset - Check to see if a global reset is complete
1256 * @hw: pointer to the hardware structure
1257 */
ice_check_reset(struct ice_hw * hw)1258 int ice_check_reset(struct ice_hw *hw)
1259 {
1260 u32 cnt, reg = 0, grst_timeout, uld_mask;
1261
1262 /* Poll for Device Active state in case a recent CORER, GLOBR,
1263 * or EMPR has occurred. The grst delay value is in 100ms units.
1264 * Add 1sec for outstanding AQ commands that can take a long time.
1265 */
1266 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1267 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1268
1269 for (cnt = 0; cnt < grst_timeout; cnt++) {
1270 mdelay(100);
1271 reg = rd32(hw, GLGEN_RSTAT);
1272 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1273 break;
1274 }
1275
1276 if (cnt == grst_timeout) {
1277 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1278 return -EIO;
1279 }
1280
1281 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1282 GLNVM_ULD_PCIER_DONE_1_M |\
1283 GLNVM_ULD_CORER_DONE_M |\
1284 GLNVM_ULD_GLOBR_DONE_M |\
1285 GLNVM_ULD_POR_DONE_M |\
1286 GLNVM_ULD_POR_DONE_1_M |\
1287 GLNVM_ULD_PCIER_DONE_2_M)
1288
1289 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1290 GLNVM_ULD_PE_DONE_M : 0);
1291
1292 /* Device is Active; check Global Reset processes are done */
1293 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1294 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1295 if (reg == uld_mask) {
1296 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1297 break;
1298 }
1299 mdelay(10);
1300 }
1301
1302 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1303 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1304 reg);
1305 return -EIO;
1306 }
1307
1308 return 0;
1309 }
1310
1311 /**
1312 * ice_pf_reset - Reset the PF
1313 * @hw: pointer to the hardware structure
1314 *
1315 * If a global reset has been triggered, this function checks
1316 * for its completion and then issues the PF reset
1317 */
ice_pf_reset(struct ice_hw * hw)1318 static int ice_pf_reset(struct ice_hw *hw)
1319 {
1320 u32 cnt, reg;
1321
1322 /* If at function entry a global reset was already in progress, i.e.
1323 * state is not 'device active' or any of the reset done bits are not
1324 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1325 * global reset is done.
1326 */
1327 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1328 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1329 /* poll on global reset currently in progress until done */
1330 if (ice_check_reset(hw))
1331 return -EIO;
1332
1333 return 0;
1334 }
1335
1336 /* Reset the PF */
1337 reg = rd32(hw, PFGEN_CTRL);
1338
1339 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1340
1341 /* Wait for the PFR to complete. The wait time is the global config lock
1342 * timeout plus the PFR timeout which will account for a possible reset
1343 * that is occurring during a download package operation.
1344 */
1345 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1346 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1347 reg = rd32(hw, PFGEN_CTRL);
1348 if (!(reg & PFGEN_CTRL_PFSWR_M))
1349 break;
1350
1351 mdelay(1);
1352 }
1353
1354 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1355 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1356 return -EIO;
1357 }
1358
1359 return 0;
1360 }
1361
1362 /**
1363 * ice_reset - Perform different types of reset
1364 * @hw: pointer to the hardware structure
1365 * @req: reset request
1366 *
1367 * This function triggers a reset as specified by the req parameter.
1368 *
1369 * Note:
1370 * If anything other than a PF reset is triggered, PXE mode is restored.
1371 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1372 * interface has been restored in the rebuild flow.
1373 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1374 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1375 {
1376 u32 val = 0;
1377
1378 switch (req) {
1379 case ICE_RESET_PFR:
1380 return ice_pf_reset(hw);
1381 case ICE_RESET_CORER:
1382 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1383 val = GLGEN_RTRIG_CORER_M;
1384 break;
1385 case ICE_RESET_GLOBR:
1386 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1387 val = GLGEN_RTRIG_GLOBR_M;
1388 break;
1389 default:
1390 return -EINVAL;
1391 }
1392
1393 val |= rd32(hw, GLGEN_RTRIG);
1394 wr32(hw, GLGEN_RTRIG, val);
1395 ice_flush(hw);
1396
1397 /* wait for the FW to be ready */
1398 return ice_check_reset(hw);
1399 }
1400
1401 /**
1402 * ice_copy_rxq_ctx_to_hw
1403 * @hw: pointer to the hardware structure
1404 * @ice_rxq_ctx: pointer to the rxq context
1405 * @rxq_index: the index of the Rx queue
1406 *
1407 * Copies rxq context from dense structure to HW register space
1408 */
1409 static int
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1410 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1411 {
1412 u8 i;
1413
1414 if (!ice_rxq_ctx)
1415 return -EINVAL;
1416
1417 if (rxq_index > QRX_CTRL_MAX_INDEX)
1418 return -EINVAL;
1419
1420 /* Copy each dword separately to HW */
1421 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1422 wr32(hw, QRX_CONTEXT(i, rxq_index),
1423 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1424
1425 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1426 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1427 }
1428
1429 return 0;
1430 }
1431
1432 /* LAN Rx Queue Context */
1433 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1434 /* Field Width LSB */
1435 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1436 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1437 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1438 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1439 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1440 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1441 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1442 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1443 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1444 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1445 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1446 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1447 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1448 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1449 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1450 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1451 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1452 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1453 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1454 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1455 { 0 }
1456 };
1457
1458 /**
1459 * ice_write_rxq_ctx
1460 * @hw: pointer to the hardware structure
1461 * @rlan_ctx: pointer to the rxq context
1462 * @rxq_index: the index of the Rx queue
1463 *
1464 * Converts rxq context from sparse to dense structure and then writes
1465 * it to HW register space and enables the hardware to prefetch descriptors
1466 * instead of only fetching them on demand
1467 */
1468 int
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1469 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1470 u32 rxq_index)
1471 {
1472 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1473
1474 if (!rlan_ctx)
1475 return -EINVAL;
1476
1477 rlan_ctx->prefena = 1;
1478
1479 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1480 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1481 }
1482
1483 /* LAN Tx Queue Context */
1484 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1485 /* Field Width LSB */
1486 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1487 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1488 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1489 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1490 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1491 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1492 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1493 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1494 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1495 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1496 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1497 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1498 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1499 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1500 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1501 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1502 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1503 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1504 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1505 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1506 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1507 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1508 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1509 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1510 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1511 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1512 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1513 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1514 { 0 }
1515 };
1516
1517 /* Sideband Queue command wrappers */
1518
1519 /**
1520 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1521 * @hw: pointer to the HW struct
1522 * @desc: descriptor describing the command
1523 * @buf: buffer to use for indirect commands (NULL for direct commands)
1524 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1525 * @cd: pointer to command details structure
1526 */
1527 static int
ice_sbq_send_cmd(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1528 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1529 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1530 {
1531 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1532 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1533 }
1534
1535 /**
1536 * ice_sbq_rw_reg - Fill Sideband Queue command
1537 * @hw: pointer to the HW struct
1538 * @in: message info to be filled in descriptor
1539 */
ice_sbq_rw_reg(struct ice_hw * hw,struct ice_sbq_msg_input * in)1540 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1541 {
1542 struct ice_sbq_cmd_desc desc = {0};
1543 struct ice_sbq_msg_req msg = {0};
1544 u16 msg_len;
1545 int status;
1546
1547 msg_len = sizeof(msg);
1548
1549 msg.dest_dev = in->dest_dev;
1550 msg.opcode = in->opcode;
1551 msg.flags = ICE_SBQ_MSG_FLAGS;
1552 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1553 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1554 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1555
1556 if (in->opcode)
1557 msg.data = cpu_to_le32(in->data);
1558 else
1559 /* data read comes back in completion, so shorten the struct by
1560 * sizeof(msg.data)
1561 */
1562 msg_len -= sizeof(msg.data);
1563
1564 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1565 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1566 desc.param0.cmd_len = cpu_to_le16(msg_len);
1567 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1568 if (!status && !in->opcode)
1569 in->data = le32_to_cpu
1570 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1571 return status;
1572 }
1573
1574 /* FW Admin Queue command wrappers */
1575
1576 /* Software lock/mutex that is meant to be held while the Global Config Lock
1577 * in firmware is acquired by the software to prevent most (but not all) types
1578 * of AQ commands from being sent to FW
1579 */
1580 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1581
1582 /**
1583 * ice_should_retry_sq_send_cmd
1584 * @opcode: AQ opcode
1585 *
1586 * Decide if we should retry the send command routine for the ATQ, depending
1587 * on the opcode.
1588 */
ice_should_retry_sq_send_cmd(u16 opcode)1589 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1590 {
1591 switch (opcode) {
1592 case ice_aqc_opc_get_link_topo:
1593 case ice_aqc_opc_lldp_stop:
1594 case ice_aqc_opc_lldp_start:
1595 case ice_aqc_opc_lldp_filter_ctrl:
1596 return true;
1597 }
1598
1599 return false;
1600 }
1601
1602 /**
1603 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1604 * @hw: pointer to the HW struct
1605 * @cq: pointer to the specific Control queue
1606 * @desc: prefilled descriptor describing the command
1607 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1608 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1609 * @cd: pointer to command details structure
1610 *
1611 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1612 * Queue if the EBUSY AQ error is returned.
1613 */
1614 static int
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1615 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1616 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1617 struct ice_sq_cd *cd)
1618 {
1619 struct ice_aq_desc desc_cpy;
1620 bool is_cmd_for_retry;
1621 u8 idx = 0;
1622 u16 opcode;
1623 int status;
1624
1625 opcode = le16_to_cpu(desc->opcode);
1626 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1627 memset(&desc_cpy, 0, sizeof(desc_cpy));
1628
1629 if (is_cmd_for_retry) {
1630 /* All retryable cmds are direct, without buf. */
1631 WARN_ON(buf);
1632
1633 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1634 }
1635
1636 do {
1637 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1638
1639 if (!is_cmd_for_retry || !status ||
1640 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1641 break;
1642
1643 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1644
1645 msleep(ICE_SQ_SEND_DELAY_TIME_MS);
1646
1647 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1648
1649 return status;
1650 }
1651
1652 /**
1653 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1654 * @hw: pointer to the HW struct
1655 * @desc: descriptor describing the command
1656 * @buf: buffer to use for indirect commands (NULL for direct commands)
1657 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1658 * @cd: pointer to command details structure
1659 *
1660 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1661 */
1662 int
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1663 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1664 u16 buf_size, struct ice_sq_cd *cd)
1665 {
1666 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1667 bool lock_acquired = false;
1668 int status;
1669
1670 /* When a package download is in process (i.e. when the firmware's
1671 * Global Configuration Lock resource is held), only the Download
1672 * Package, Get Version, Get Package Info List, Upload Section,
1673 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
1674 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
1675 * Recipes to Profile Association, and Release Resource (with resource
1676 * ID set to Global Config Lock) AdminQ commands are allowed; all others
1677 * must block until the package download completes and the Global Config
1678 * Lock is released. See also ice_acquire_global_cfg_lock().
1679 */
1680 switch (le16_to_cpu(desc->opcode)) {
1681 case ice_aqc_opc_download_pkg:
1682 case ice_aqc_opc_get_pkg_info_list:
1683 case ice_aqc_opc_get_ver:
1684 case ice_aqc_opc_upload_section:
1685 case ice_aqc_opc_update_pkg:
1686 case ice_aqc_opc_set_port_params:
1687 case ice_aqc_opc_get_vlan_mode_parameters:
1688 case ice_aqc_opc_set_vlan_mode_parameters:
1689 case ice_aqc_opc_add_recipe:
1690 case ice_aqc_opc_recipe_to_profile:
1691 case ice_aqc_opc_get_recipe:
1692 case ice_aqc_opc_get_recipe_to_profile:
1693 break;
1694 case ice_aqc_opc_release_res:
1695 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1696 break;
1697 fallthrough;
1698 default:
1699 mutex_lock(&ice_global_cfg_lock_sw);
1700 lock_acquired = true;
1701 break;
1702 }
1703
1704 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1705 if (lock_acquired)
1706 mutex_unlock(&ice_global_cfg_lock_sw);
1707
1708 return status;
1709 }
1710
1711 /**
1712 * ice_aq_get_fw_ver
1713 * @hw: pointer to the HW struct
1714 * @cd: pointer to command details structure or NULL
1715 *
1716 * Get the firmware version (0x0001) from the admin queue commands
1717 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1718 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1719 {
1720 struct ice_aqc_get_ver *resp;
1721 struct ice_aq_desc desc;
1722 int status;
1723
1724 resp = &desc.params.get_ver;
1725
1726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1727
1728 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1729
1730 if (!status) {
1731 hw->fw_branch = resp->fw_branch;
1732 hw->fw_maj_ver = resp->fw_major;
1733 hw->fw_min_ver = resp->fw_minor;
1734 hw->fw_patch = resp->fw_patch;
1735 hw->fw_build = le32_to_cpu(resp->fw_build);
1736 hw->api_branch = resp->api_branch;
1737 hw->api_maj_ver = resp->api_major;
1738 hw->api_min_ver = resp->api_minor;
1739 hw->api_patch = resp->api_patch;
1740 }
1741
1742 return status;
1743 }
1744
1745 /**
1746 * ice_aq_send_driver_ver
1747 * @hw: pointer to the HW struct
1748 * @dv: driver's major, minor version
1749 * @cd: pointer to command details structure or NULL
1750 *
1751 * Send the driver version (0x0002) to the firmware
1752 */
1753 int
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1754 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1755 struct ice_sq_cd *cd)
1756 {
1757 struct ice_aqc_driver_ver *cmd;
1758 struct ice_aq_desc desc;
1759 u16 len;
1760
1761 cmd = &desc.params.driver_ver;
1762
1763 if (!dv)
1764 return -EINVAL;
1765
1766 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1767
1768 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1769 cmd->major_ver = dv->major_ver;
1770 cmd->minor_ver = dv->minor_ver;
1771 cmd->build_ver = dv->build_ver;
1772 cmd->subbuild_ver = dv->subbuild_ver;
1773
1774 len = 0;
1775 while (len < sizeof(dv->driver_string) &&
1776 isascii(dv->driver_string[len]) && dv->driver_string[len])
1777 len++;
1778
1779 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1780 }
1781
1782 /**
1783 * ice_aq_q_shutdown
1784 * @hw: pointer to the HW struct
1785 * @unloading: is the driver unloading itself
1786 *
1787 * Tell the Firmware that we're shutting down the AdminQ and whether
1788 * or not the driver is unloading as well (0x0003).
1789 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1790 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1791 {
1792 struct ice_aqc_q_shutdown *cmd;
1793 struct ice_aq_desc desc;
1794
1795 cmd = &desc.params.q_shutdown;
1796
1797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1798
1799 if (unloading)
1800 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1801
1802 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1803 }
1804
1805 /**
1806 * ice_aq_req_res
1807 * @hw: pointer to the HW struct
1808 * @res: resource ID
1809 * @access: access type
1810 * @sdp_number: resource number
1811 * @timeout: the maximum time in ms that the driver may hold the resource
1812 * @cd: pointer to command details structure or NULL
1813 *
1814 * Requests common resource using the admin queue commands (0x0008).
1815 * When attempting to acquire the Global Config Lock, the driver can
1816 * learn of three states:
1817 * 1) 0 - acquired lock, and can perform download package
1818 * 2) -EIO - did not get lock, driver should fail to load
1819 * 3) -EALREADY - did not get lock, but another driver has
1820 * successfully downloaded the package; the driver does
1821 * not have to download the package and can continue
1822 * loading
1823 *
1824 * Note that if the caller is in an acquire lock, perform action, release lock
1825 * phase of operation, it is possible that the FW may detect a timeout and issue
1826 * a CORER. In this case, the driver will receive a CORER interrupt and will
1827 * have to determine its cause. The calling thread that is handling this flow
1828 * will likely get an error propagated back to it indicating the Download
1829 * Package, Update Package or the Release Resource AQ commands timed out.
1830 */
1831 static int
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1832 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1833 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1834 struct ice_sq_cd *cd)
1835 {
1836 struct ice_aqc_req_res *cmd_resp;
1837 struct ice_aq_desc desc;
1838 int status;
1839
1840 cmd_resp = &desc.params.res_owner;
1841
1842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1843
1844 cmd_resp->res_id = cpu_to_le16(res);
1845 cmd_resp->access_type = cpu_to_le16(access);
1846 cmd_resp->res_number = cpu_to_le32(sdp_number);
1847 cmd_resp->timeout = cpu_to_le32(*timeout);
1848 *timeout = 0;
1849
1850 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1851
1852 /* The completion specifies the maximum time in ms that the driver
1853 * may hold the resource in the Timeout field.
1854 */
1855
1856 /* Global config lock response utilizes an additional status field.
1857 *
1858 * If the Global config lock resource is held by some other driver, the
1859 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1860 * and the timeout field indicates the maximum time the current owner
1861 * of the resource has to free it.
1862 */
1863 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1864 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1865 *timeout = le32_to_cpu(cmd_resp->timeout);
1866 return 0;
1867 } else if (le16_to_cpu(cmd_resp->status) ==
1868 ICE_AQ_RES_GLBL_IN_PROG) {
1869 *timeout = le32_to_cpu(cmd_resp->timeout);
1870 return -EIO;
1871 } else if (le16_to_cpu(cmd_resp->status) ==
1872 ICE_AQ_RES_GLBL_DONE) {
1873 return -EALREADY;
1874 }
1875
1876 /* invalid FW response, force a timeout immediately */
1877 *timeout = 0;
1878 return -EIO;
1879 }
1880
1881 /* If the resource is held by some other driver, the command completes
1882 * with a busy return value and the timeout field indicates the maximum
1883 * time the current owner of the resource has to free it.
1884 */
1885 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1886 *timeout = le32_to_cpu(cmd_resp->timeout);
1887
1888 return status;
1889 }
1890
1891 /**
1892 * ice_aq_release_res
1893 * @hw: pointer to the HW struct
1894 * @res: resource ID
1895 * @sdp_number: resource number
1896 * @cd: pointer to command details structure or NULL
1897 *
1898 * release common resource using the admin queue commands (0x0009)
1899 */
1900 static int
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1901 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1902 struct ice_sq_cd *cd)
1903 {
1904 struct ice_aqc_req_res *cmd;
1905 struct ice_aq_desc desc;
1906
1907 cmd = &desc.params.res_owner;
1908
1909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1910
1911 cmd->res_id = cpu_to_le16(res);
1912 cmd->res_number = cpu_to_le32(sdp_number);
1913
1914 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1915 }
1916
1917 /**
1918 * ice_acquire_res
1919 * @hw: pointer to the HW structure
1920 * @res: resource ID
1921 * @access: access type (read or write)
1922 * @timeout: timeout in milliseconds
1923 *
1924 * This function will attempt to acquire the ownership of a resource.
1925 */
1926 int
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1927 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1928 enum ice_aq_res_access_type access, u32 timeout)
1929 {
1930 #define ICE_RES_POLLING_DELAY_MS 10
1931 u32 delay = ICE_RES_POLLING_DELAY_MS;
1932 u32 time_left = timeout;
1933 int status;
1934
1935 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1936
1937 /* A return code of -EALREADY means that another driver has
1938 * previously acquired the resource and performed any necessary updates;
1939 * in this case the caller does not obtain the resource and has no
1940 * further work to do.
1941 */
1942 if (status == -EALREADY)
1943 goto ice_acquire_res_exit;
1944
1945 if (status)
1946 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1947
1948 /* If necessary, poll until the current lock owner timeouts */
1949 timeout = time_left;
1950 while (status && timeout && time_left) {
1951 mdelay(delay);
1952 timeout = (timeout > delay) ? timeout - delay : 0;
1953 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1954
1955 if (status == -EALREADY)
1956 /* lock free, but no work to do */
1957 break;
1958
1959 if (!status)
1960 /* lock acquired */
1961 break;
1962 }
1963 if (status && status != -EALREADY)
1964 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1965
1966 ice_acquire_res_exit:
1967 if (status == -EALREADY) {
1968 if (access == ICE_RES_WRITE)
1969 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1970 else
1971 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1972 }
1973 return status;
1974 }
1975
1976 /**
1977 * ice_release_res
1978 * @hw: pointer to the HW structure
1979 * @res: resource ID
1980 *
1981 * This function will release a resource using the proper Admin Command.
1982 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)1983 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1984 {
1985 unsigned long timeout;
1986 int status;
1987
1988 /* there are some rare cases when trying to release the resource
1989 * results in an admin queue timeout, so handle them correctly
1990 */
1991 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
1992 do {
1993 status = ice_aq_release_res(hw, res, 0, NULL);
1994 if (status != -EIO)
1995 break;
1996 usleep_range(1000, 2000);
1997 } while (time_before(jiffies, timeout));
1998 }
1999
2000 /**
2001 * ice_aq_alloc_free_res - command to allocate/free resources
2002 * @hw: pointer to the HW struct
2003 * @buf: Indirect buffer to hold data parameters and response
2004 * @buf_size: size of buffer for indirect commands
2005 * @opc: pass in the command opcode
2006 *
2007 * Helper function to allocate/free resources using the admin queue commands
2008 */
ice_aq_alloc_free_res(struct ice_hw * hw,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc)2009 int ice_aq_alloc_free_res(struct ice_hw *hw,
2010 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2011 enum ice_adminq_opc opc)
2012 {
2013 struct ice_aqc_alloc_free_res_cmd *cmd;
2014 struct ice_aq_desc desc;
2015
2016 cmd = &desc.params.sw_res_ctrl;
2017
2018 if (!buf || buf_size < flex_array_size(buf, elem, 1))
2019 return -EINVAL;
2020
2021 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2022
2023 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2024
2025 cmd->num_entries = cpu_to_le16(1);
2026
2027 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
2028 }
2029
2030 /**
2031 * ice_alloc_hw_res - allocate resource
2032 * @hw: pointer to the HW struct
2033 * @type: type of resource
2034 * @num: number of resources to allocate
2035 * @btm: allocate from bottom
2036 * @res: pointer to array that will receive the resources
2037 */
2038 int
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2039 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2040 {
2041 struct ice_aqc_alloc_free_res_elem *buf;
2042 u16 buf_len;
2043 int status;
2044
2045 buf_len = struct_size(buf, elem, num);
2046 buf = kzalloc(buf_len, GFP_KERNEL);
2047 if (!buf)
2048 return -ENOMEM;
2049
2050 /* Prepare buffer to allocate resource. */
2051 buf->num_elems = cpu_to_le16(num);
2052 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2053 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2054 if (btm)
2055 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2056
2057 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
2058 if (status)
2059 goto ice_alloc_res_exit;
2060
2061 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
2062
2063 ice_alloc_res_exit:
2064 kfree(buf);
2065 return status;
2066 }
2067
2068 /**
2069 * ice_free_hw_res - free allocated HW resource
2070 * @hw: pointer to the HW struct
2071 * @type: type of resource to free
2072 * @num: number of resources
2073 * @res: pointer to array that contains the resources to free
2074 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2075 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2076 {
2077 struct ice_aqc_alloc_free_res_elem *buf;
2078 u16 buf_len;
2079 int status;
2080
2081 buf_len = struct_size(buf, elem, num);
2082 buf = kzalloc(buf_len, GFP_KERNEL);
2083 if (!buf)
2084 return -ENOMEM;
2085
2086 /* Prepare buffer to free resource. */
2087 buf->num_elems = cpu_to_le16(num);
2088 buf->res_type = cpu_to_le16(type);
2089 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
2090
2091 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
2092 if (status)
2093 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2094
2095 kfree(buf);
2096 return status;
2097 }
2098
2099 /**
2100 * ice_get_num_per_func - determine number of resources per PF
2101 * @hw: pointer to the HW structure
2102 * @max: value to be evenly split between each PF
2103 *
2104 * Determine the number of valid functions by going through the bitmap returned
2105 * from parsing capabilities and use this to calculate the number of resources
2106 * per PF based on the max value passed in.
2107 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2108 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2109 {
2110 u8 funcs;
2111
2112 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2113 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
2114 ICE_CAPS_VALID_FUNCS_M);
2115
2116 if (!funcs)
2117 return 0;
2118
2119 return max / funcs;
2120 }
2121
2122 /**
2123 * ice_parse_common_caps - parse common device/function capabilities
2124 * @hw: pointer to the HW struct
2125 * @caps: pointer to common capabilities structure
2126 * @elem: the capability element to parse
2127 * @prefix: message prefix for tracing capabilities
2128 *
2129 * Given a capability element, extract relevant details into the common
2130 * capability structure.
2131 *
2132 * Returns: true if the capability matches one of the common capability ids,
2133 * false otherwise.
2134 */
2135 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2136 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2137 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2138 {
2139 u32 logical_id = le32_to_cpu(elem->logical_id);
2140 u32 phys_id = le32_to_cpu(elem->phys_id);
2141 u32 number = le32_to_cpu(elem->number);
2142 u16 cap = le16_to_cpu(elem->cap);
2143 bool found = true;
2144
2145 switch (cap) {
2146 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2147 caps->valid_functions = number;
2148 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2149 caps->valid_functions);
2150 break;
2151 case ICE_AQC_CAPS_SRIOV:
2152 caps->sr_iov_1_1 = (number == 1);
2153 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2154 caps->sr_iov_1_1);
2155 break;
2156 case ICE_AQC_CAPS_DCB:
2157 caps->dcb = (number == 1);
2158 caps->active_tc_bitmap = logical_id;
2159 caps->maxtc = phys_id;
2160 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2161 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2162 caps->active_tc_bitmap);
2163 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2164 break;
2165 case ICE_AQC_CAPS_RSS:
2166 caps->rss_table_size = number;
2167 caps->rss_table_entry_width = logical_id;
2168 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2169 caps->rss_table_size);
2170 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2171 caps->rss_table_entry_width);
2172 break;
2173 case ICE_AQC_CAPS_RXQS:
2174 caps->num_rxq = number;
2175 caps->rxq_first_id = phys_id;
2176 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2177 caps->num_rxq);
2178 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2179 caps->rxq_first_id);
2180 break;
2181 case ICE_AQC_CAPS_TXQS:
2182 caps->num_txq = number;
2183 caps->txq_first_id = phys_id;
2184 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2185 caps->num_txq);
2186 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2187 caps->txq_first_id);
2188 break;
2189 case ICE_AQC_CAPS_MSIX:
2190 caps->num_msix_vectors = number;
2191 caps->msix_vector_first_id = phys_id;
2192 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2193 caps->num_msix_vectors);
2194 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2195 caps->msix_vector_first_id);
2196 break;
2197 case ICE_AQC_CAPS_PENDING_NVM_VER:
2198 caps->nvm_update_pending_nvm = true;
2199 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2200 break;
2201 case ICE_AQC_CAPS_PENDING_OROM_VER:
2202 caps->nvm_update_pending_orom = true;
2203 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2204 break;
2205 case ICE_AQC_CAPS_PENDING_NET_VER:
2206 caps->nvm_update_pending_netlist = true;
2207 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2208 break;
2209 case ICE_AQC_CAPS_NVM_MGMT:
2210 caps->nvm_unified_update =
2211 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2212 true : false;
2213 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2214 caps->nvm_unified_update);
2215 break;
2216 case ICE_AQC_CAPS_RDMA:
2217 caps->rdma = (number == 1);
2218 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2219 break;
2220 case ICE_AQC_CAPS_MAX_MTU:
2221 caps->max_mtu = number;
2222 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2223 prefix, caps->max_mtu);
2224 break;
2225 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2226 caps->pcie_reset_avoidance = (number > 0);
2227 ice_debug(hw, ICE_DBG_INIT,
2228 "%s: pcie_reset_avoidance = %d\n", prefix,
2229 caps->pcie_reset_avoidance);
2230 break;
2231 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2232 caps->reset_restrict_support = (number == 1);
2233 ice_debug(hw, ICE_DBG_INIT,
2234 "%s: reset_restrict_support = %d\n", prefix,
2235 caps->reset_restrict_support);
2236 break;
2237 case ICE_AQC_CAPS_FW_LAG_SUPPORT:
2238 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2239 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2240 prefix, caps->roce_lag);
2241 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
2242 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
2243 prefix, caps->sriov_lag);
2244 break;
2245 default:
2246 /* Not one of the recognized common capabilities */
2247 found = false;
2248 }
2249
2250 return found;
2251 }
2252
2253 /**
2254 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2255 * @hw: pointer to the HW structure
2256 * @caps: pointer to capabilities structure to fix
2257 *
2258 * Re-calculate the capabilities that are dependent on the number of physical
2259 * ports; i.e. some features are not supported or function differently on
2260 * devices with more than 4 ports.
2261 */
2262 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2263 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2264 {
2265 /* This assumes device capabilities are always scanned before function
2266 * capabilities during the initialization flow.
2267 */
2268 if (hw->dev_caps.num_funcs > 4) {
2269 /* Max 4 TCs per port */
2270 caps->maxtc = 4;
2271 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2272 caps->maxtc);
2273 if (caps->rdma) {
2274 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2275 caps->rdma = 0;
2276 }
2277
2278 /* print message only when processing device capabilities
2279 * during initialization.
2280 */
2281 if (caps == &hw->dev_caps.common_cap)
2282 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2283 }
2284 }
2285
2286 /**
2287 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2288 * @hw: pointer to the HW struct
2289 * @func_p: pointer to function capabilities structure
2290 * @cap: pointer to the capability element to parse
2291 *
2292 * Extract function capabilities for ICE_AQC_CAPS_VF.
2293 */
2294 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2295 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2296 struct ice_aqc_list_caps_elem *cap)
2297 {
2298 u32 logical_id = le32_to_cpu(cap->logical_id);
2299 u32 number = le32_to_cpu(cap->number);
2300
2301 func_p->num_allocd_vfs = number;
2302 func_p->vf_base_id = logical_id;
2303 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2304 func_p->num_allocd_vfs);
2305 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2306 func_p->vf_base_id);
2307 }
2308
2309 /**
2310 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2311 * @hw: pointer to the HW struct
2312 * @func_p: pointer to function capabilities structure
2313 * @cap: pointer to the capability element to parse
2314 *
2315 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2316 */
2317 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2318 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2319 struct ice_aqc_list_caps_elem *cap)
2320 {
2321 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2322 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2323 le32_to_cpu(cap->number));
2324 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2325 func_p->guar_num_vsi);
2326 }
2327
2328 /**
2329 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2330 * @hw: pointer to the HW struct
2331 * @func_p: pointer to function capabilities structure
2332 * @cap: pointer to the capability element to parse
2333 *
2334 * Extract function capabilities for ICE_AQC_CAPS_1588.
2335 */
2336 static void
ice_parse_1588_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2337 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2338 struct ice_aqc_list_caps_elem *cap)
2339 {
2340 struct ice_ts_func_info *info = &func_p->ts_func_info;
2341 u32 number = le32_to_cpu(cap->number);
2342
2343 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2344 func_p->common_cap.ieee_1588 = info->ena;
2345
2346 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2347 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2348 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2349 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2350
2351 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2352 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2353
2354 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2355 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2356 } else {
2357 /* Unknown clock frequency, so assume a (probably incorrect)
2358 * default to avoid out-of-bounds look ups of frequency
2359 * related information.
2360 */
2361 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2362 info->clk_freq);
2363 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2364 }
2365
2366 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2367 func_p->common_cap.ieee_1588);
2368 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2369 info->src_tmr_owned);
2370 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2371 info->tmr_ena);
2372 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2373 info->tmr_index_owned);
2374 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2375 info->tmr_index_assoc);
2376 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2377 info->clk_freq);
2378 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2379 info->clk_src);
2380 }
2381
2382 /**
2383 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2384 * @hw: pointer to the HW struct
2385 * @func_p: pointer to function capabilities structure
2386 *
2387 * Extract function capabilities for ICE_AQC_CAPS_FD.
2388 */
2389 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)2390 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2391 {
2392 u32 reg_val, val;
2393
2394 reg_val = rd32(hw, GLQF_FD_SIZE);
2395 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2396 GLQF_FD_SIZE_FD_GSIZE_S;
2397 func_p->fd_fltr_guar =
2398 ice_get_num_per_func(hw, val);
2399 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2400 GLQF_FD_SIZE_FD_BSIZE_S;
2401 func_p->fd_fltr_best_effort = val;
2402
2403 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2404 func_p->fd_fltr_guar);
2405 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2406 func_p->fd_fltr_best_effort);
2407 }
2408
2409 /**
2410 * ice_parse_func_caps - Parse function capabilities
2411 * @hw: pointer to the HW struct
2412 * @func_p: pointer to function capabilities structure
2413 * @buf: buffer containing the function capability records
2414 * @cap_count: the number of capabilities
2415 *
2416 * Helper function to parse function (0x000A) capabilities list. For
2417 * capabilities shared between device and function, this relies on
2418 * ice_parse_common_caps.
2419 *
2420 * Loop through the list of provided capabilities and extract the relevant
2421 * data into the function capabilities structured.
2422 */
2423 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2424 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2425 void *buf, u32 cap_count)
2426 {
2427 struct ice_aqc_list_caps_elem *cap_resp;
2428 u32 i;
2429
2430 cap_resp = buf;
2431
2432 memset(func_p, 0, sizeof(*func_p));
2433
2434 for (i = 0; i < cap_count; i++) {
2435 u16 cap = le16_to_cpu(cap_resp[i].cap);
2436 bool found;
2437
2438 found = ice_parse_common_caps(hw, &func_p->common_cap,
2439 &cap_resp[i], "func caps");
2440
2441 switch (cap) {
2442 case ICE_AQC_CAPS_VF:
2443 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2444 break;
2445 case ICE_AQC_CAPS_VSI:
2446 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2447 break;
2448 case ICE_AQC_CAPS_1588:
2449 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2450 break;
2451 case ICE_AQC_CAPS_FD:
2452 ice_parse_fdir_func_caps(hw, func_p);
2453 break;
2454 default:
2455 /* Don't list common capabilities as unknown */
2456 if (!found)
2457 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2458 i, cap);
2459 break;
2460 }
2461 }
2462
2463 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2464 }
2465
2466 /**
2467 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2468 * @hw: pointer to the HW struct
2469 * @dev_p: pointer to device capabilities structure
2470 * @cap: capability element to parse
2471 *
2472 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2473 */
2474 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2475 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2476 struct ice_aqc_list_caps_elem *cap)
2477 {
2478 u32 number = le32_to_cpu(cap->number);
2479
2480 dev_p->num_funcs = hweight32(number);
2481 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2482 dev_p->num_funcs);
2483 }
2484
2485 /**
2486 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2487 * @hw: pointer to the HW struct
2488 * @dev_p: pointer to device capabilities structure
2489 * @cap: capability element to parse
2490 *
2491 * Parse ICE_AQC_CAPS_VF for device capabilities.
2492 */
2493 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2494 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2495 struct ice_aqc_list_caps_elem *cap)
2496 {
2497 u32 number = le32_to_cpu(cap->number);
2498
2499 dev_p->num_vfs_exposed = number;
2500 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2501 dev_p->num_vfs_exposed);
2502 }
2503
2504 /**
2505 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2506 * @hw: pointer to the HW struct
2507 * @dev_p: pointer to device capabilities structure
2508 * @cap: capability element to parse
2509 *
2510 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2511 */
2512 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2513 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2514 struct ice_aqc_list_caps_elem *cap)
2515 {
2516 u32 number = le32_to_cpu(cap->number);
2517
2518 dev_p->num_vsi_allocd_to_host = number;
2519 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2520 dev_p->num_vsi_allocd_to_host);
2521 }
2522
2523 /**
2524 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2525 * @hw: pointer to the HW struct
2526 * @dev_p: pointer to device capabilities structure
2527 * @cap: capability element to parse
2528 *
2529 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2530 */
2531 static void
ice_parse_1588_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2532 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2533 struct ice_aqc_list_caps_elem *cap)
2534 {
2535 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2536 u32 logical_id = le32_to_cpu(cap->logical_id);
2537 u32 phys_id = le32_to_cpu(cap->phys_id);
2538 u32 number = le32_to_cpu(cap->number);
2539
2540 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2541 dev_p->common_cap.ieee_1588 = info->ena;
2542
2543 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2544 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2545 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2546
2547 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2548 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2549 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2550
2551 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
2552
2553 info->ena_ports = logical_id;
2554 info->tmr_own_map = phys_id;
2555
2556 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2557 dev_p->common_cap.ieee_1588);
2558 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2559 info->tmr0_owner);
2560 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2561 info->tmr0_owned);
2562 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2563 info->tmr0_ena);
2564 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2565 info->tmr1_owner);
2566 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2567 info->tmr1_owned);
2568 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2569 info->tmr1_ena);
2570 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
2571 info->ts_ll_read);
2572 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2573 info->ena_ports);
2574 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2575 info->tmr_own_map);
2576 }
2577
2578 /**
2579 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2580 * @hw: pointer to the HW struct
2581 * @dev_p: pointer to device capabilities structure
2582 * @cap: capability element to parse
2583 *
2584 * Parse ICE_AQC_CAPS_FD for device capabilities.
2585 */
2586 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2587 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2588 struct ice_aqc_list_caps_elem *cap)
2589 {
2590 u32 number = le32_to_cpu(cap->number);
2591
2592 dev_p->num_flow_director_fltr = number;
2593 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2594 dev_p->num_flow_director_fltr);
2595 }
2596
2597 /**
2598 * ice_parse_dev_caps - Parse device capabilities
2599 * @hw: pointer to the HW struct
2600 * @dev_p: pointer to device capabilities structure
2601 * @buf: buffer containing the device capability records
2602 * @cap_count: the number of capabilities
2603 *
2604 * Helper device to parse device (0x000B) capabilities list. For
2605 * capabilities shared between device and function, this relies on
2606 * ice_parse_common_caps.
2607 *
2608 * Loop through the list of provided capabilities and extract the relevant
2609 * data into the device capabilities structured.
2610 */
2611 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2612 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2613 void *buf, u32 cap_count)
2614 {
2615 struct ice_aqc_list_caps_elem *cap_resp;
2616 u32 i;
2617
2618 cap_resp = buf;
2619
2620 memset(dev_p, 0, sizeof(*dev_p));
2621
2622 for (i = 0; i < cap_count; i++) {
2623 u16 cap = le16_to_cpu(cap_resp[i].cap);
2624 bool found;
2625
2626 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2627 &cap_resp[i], "dev caps");
2628
2629 switch (cap) {
2630 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2631 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2632 break;
2633 case ICE_AQC_CAPS_VF:
2634 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2635 break;
2636 case ICE_AQC_CAPS_VSI:
2637 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2638 break;
2639 case ICE_AQC_CAPS_1588:
2640 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2641 break;
2642 case ICE_AQC_CAPS_FD:
2643 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2644 break;
2645 default:
2646 /* Don't list common capabilities as unknown */
2647 if (!found)
2648 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2649 i, cap);
2650 break;
2651 }
2652 }
2653
2654 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2655 }
2656
2657 /**
2658 * ice_aq_get_netlist_node
2659 * @hw: pointer to the hw struct
2660 * @cmd: get_link_topo AQ structure
2661 * @node_part_number: output node part number if node found
2662 * @node_handle: output node handle parameter if node found
2663 */
2664 static int
ice_aq_get_netlist_node(struct ice_hw * hw,struct ice_aqc_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)2665 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
2666 u8 *node_part_number, u16 *node_handle)
2667 {
2668 struct ice_aq_desc desc;
2669
2670 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
2671 desc.params.get_link_topo = *cmd;
2672
2673 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
2674 return -EIO;
2675
2676 if (node_handle)
2677 *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle);
2678 if (node_part_number)
2679 *node_part_number = desc.params.get_link_topo.node_part_num;
2680
2681 return 0;
2682 }
2683
2684 /**
2685 * ice_is_pf_c827 - check if pf contains c827 phy
2686 * @hw: pointer to the hw struct
2687 */
ice_is_pf_c827(struct ice_hw * hw)2688 bool ice_is_pf_c827(struct ice_hw *hw)
2689 {
2690 struct ice_aqc_get_link_topo cmd = {};
2691 u8 node_part_number;
2692 u16 node_handle;
2693 int status;
2694
2695 if (hw->mac_type != ICE_MAC_E810)
2696 return false;
2697
2698 if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
2699 return true;
2700
2701 cmd.addr.topo_params.node_type_ctx =
2702 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
2703 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
2704 cmd.addr.topo_params.index = 0;
2705
2706 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
2707 &node_handle);
2708
2709 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
2710 return false;
2711
2712 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
2713 return true;
2714
2715 return false;
2716 }
2717
2718 /**
2719 * ice_aq_list_caps - query function/device capabilities
2720 * @hw: pointer to the HW struct
2721 * @buf: a buffer to hold the capabilities
2722 * @buf_size: size of the buffer
2723 * @cap_count: if not NULL, set to the number of capabilities reported
2724 * @opc: capabilities type to discover, device or function
2725 * @cd: pointer to command details structure or NULL
2726 *
2727 * Get the function (0x000A) or device (0x000B) capabilities description from
2728 * firmware and store it in the buffer.
2729 *
2730 * If the cap_count pointer is not NULL, then it is set to the number of
2731 * capabilities firmware will report. Note that if the buffer size is too
2732 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2733 * cap_count will still be updated in this case. It is recommended that the
2734 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2735 * firmware could return) to avoid this.
2736 */
2737 int
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2738 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2739 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2740 {
2741 struct ice_aqc_list_caps *cmd;
2742 struct ice_aq_desc desc;
2743 int status;
2744
2745 cmd = &desc.params.get_cap;
2746
2747 if (opc != ice_aqc_opc_list_func_caps &&
2748 opc != ice_aqc_opc_list_dev_caps)
2749 return -EINVAL;
2750
2751 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2752 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2753
2754 if (cap_count)
2755 *cap_count = le32_to_cpu(cmd->count);
2756
2757 return status;
2758 }
2759
2760 /**
2761 * ice_discover_dev_caps - Read and extract device capabilities
2762 * @hw: pointer to the hardware structure
2763 * @dev_caps: pointer to device capabilities structure
2764 *
2765 * Read the device capabilities and extract them into the dev_caps structure
2766 * for later use.
2767 */
2768 int
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2769 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2770 {
2771 u32 cap_count = 0;
2772 void *cbuf;
2773 int status;
2774
2775 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2776 if (!cbuf)
2777 return -ENOMEM;
2778
2779 /* Although the driver doesn't know the number of capabilities the
2780 * device will return, we can simply send a 4KB buffer, the maximum
2781 * possible size that firmware can return.
2782 */
2783 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2784
2785 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2786 ice_aqc_opc_list_dev_caps, NULL);
2787 if (!status)
2788 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2789 kfree(cbuf);
2790
2791 return status;
2792 }
2793
2794 /**
2795 * ice_discover_func_caps - Read and extract function capabilities
2796 * @hw: pointer to the hardware structure
2797 * @func_caps: pointer to function capabilities structure
2798 *
2799 * Read the function capabilities and extract them into the func_caps structure
2800 * for later use.
2801 */
2802 static int
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2803 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2804 {
2805 u32 cap_count = 0;
2806 void *cbuf;
2807 int status;
2808
2809 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2810 if (!cbuf)
2811 return -ENOMEM;
2812
2813 /* Although the driver doesn't know the number of capabilities the
2814 * device will return, we can simply send a 4KB buffer, the maximum
2815 * possible size that firmware can return.
2816 */
2817 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2818
2819 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2820 ice_aqc_opc_list_func_caps, NULL);
2821 if (!status)
2822 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2823 kfree(cbuf);
2824
2825 return status;
2826 }
2827
2828 /**
2829 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2830 * @hw: pointer to the hardware structure
2831 */
ice_set_safe_mode_caps(struct ice_hw * hw)2832 void ice_set_safe_mode_caps(struct ice_hw *hw)
2833 {
2834 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2835 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2836 struct ice_hw_common_caps cached_caps;
2837 u32 num_funcs;
2838
2839 /* cache some func_caps values that should be restored after memset */
2840 cached_caps = func_caps->common_cap;
2841
2842 /* unset func capabilities */
2843 memset(func_caps, 0, sizeof(*func_caps));
2844
2845 #define ICE_RESTORE_FUNC_CAP(name) \
2846 func_caps->common_cap.name = cached_caps.name
2847
2848 /* restore cached values */
2849 ICE_RESTORE_FUNC_CAP(valid_functions);
2850 ICE_RESTORE_FUNC_CAP(txq_first_id);
2851 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2852 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2853 ICE_RESTORE_FUNC_CAP(max_mtu);
2854 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2855 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2856 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2857 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2858
2859 /* one Tx and one Rx queue in safe mode */
2860 func_caps->common_cap.num_rxq = 1;
2861 func_caps->common_cap.num_txq = 1;
2862
2863 /* two MSIX vectors, one for traffic and one for misc causes */
2864 func_caps->common_cap.num_msix_vectors = 2;
2865 func_caps->guar_num_vsi = 1;
2866
2867 /* cache some dev_caps values that should be restored after memset */
2868 cached_caps = dev_caps->common_cap;
2869 num_funcs = dev_caps->num_funcs;
2870
2871 /* unset dev capabilities */
2872 memset(dev_caps, 0, sizeof(*dev_caps));
2873
2874 #define ICE_RESTORE_DEV_CAP(name) \
2875 dev_caps->common_cap.name = cached_caps.name
2876
2877 /* restore cached values */
2878 ICE_RESTORE_DEV_CAP(valid_functions);
2879 ICE_RESTORE_DEV_CAP(txq_first_id);
2880 ICE_RESTORE_DEV_CAP(rxq_first_id);
2881 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2882 ICE_RESTORE_DEV_CAP(max_mtu);
2883 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2884 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2885 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2886 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2887 dev_caps->num_funcs = num_funcs;
2888
2889 /* one Tx and one Rx queue per function in safe mode */
2890 dev_caps->common_cap.num_rxq = num_funcs;
2891 dev_caps->common_cap.num_txq = num_funcs;
2892
2893 /* two MSIX vectors per function */
2894 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2895 }
2896
2897 /**
2898 * ice_get_caps - get info about the HW
2899 * @hw: pointer to the hardware structure
2900 */
ice_get_caps(struct ice_hw * hw)2901 int ice_get_caps(struct ice_hw *hw)
2902 {
2903 int status;
2904
2905 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2906 if (status)
2907 return status;
2908
2909 return ice_discover_func_caps(hw, &hw->func_caps);
2910 }
2911
2912 /**
2913 * ice_aq_manage_mac_write - manage MAC address write command
2914 * @hw: pointer to the HW struct
2915 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2916 * @flags: flags to control write behavior
2917 * @cd: pointer to command details structure or NULL
2918 *
2919 * This function is used to write MAC address to the NVM (0x0108).
2920 */
2921 int
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2922 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2923 struct ice_sq_cd *cd)
2924 {
2925 struct ice_aqc_manage_mac_write *cmd;
2926 struct ice_aq_desc desc;
2927
2928 cmd = &desc.params.mac_write;
2929 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2930
2931 cmd->flags = flags;
2932 ether_addr_copy(cmd->mac_addr, mac_addr);
2933
2934 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2935 }
2936
2937 /**
2938 * ice_aq_clear_pxe_mode
2939 * @hw: pointer to the HW struct
2940 *
2941 * Tell the firmware that the driver is taking over from PXE (0x0110).
2942 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2943 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2944 {
2945 struct ice_aq_desc desc;
2946
2947 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2948 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2949
2950 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2951 }
2952
2953 /**
2954 * ice_clear_pxe_mode - clear pxe operations mode
2955 * @hw: pointer to the HW struct
2956 *
2957 * Make sure all PXE mode settings are cleared, including things
2958 * like descriptor fetch/write-back mode.
2959 */
ice_clear_pxe_mode(struct ice_hw * hw)2960 void ice_clear_pxe_mode(struct ice_hw *hw)
2961 {
2962 if (ice_check_sq_alive(hw, &hw->adminq))
2963 ice_aq_clear_pxe_mode(hw);
2964 }
2965
2966 /**
2967 * ice_aq_set_port_params - set physical port parameters.
2968 * @pi: pointer to the port info struct
2969 * @double_vlan: if set double VLAN is enabled
2970 * @cd: pointer to command details structure or NULL
2971 *
2972 * Set Physical port parameters (0x0203)
2973 */
2974 int
ice_aq_set_port_params(struct ice_port_info * pi,bool double_vlan,struct ice_sq_cd * cd)2975 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
2976 struct ice_sq_cd *cd)
2977
2978 {
2979 struct ice_aqc_set_port_params *cmd;
2980 struct ice_hw *hw = pi->hw;
2981 struct ice_aq_desc desc;
2982 u16 cmd_flags = 0;
2983
2984 cmd = &desc.params.set_port_params;
2985
2986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2987 if (double_vlan)
2988 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2989 cmd->cmd_flags = cpu_to_le16(cmd_flags);
2990
2991 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2992 }
2993
2994 /**
2995 * ice_is_100m_speed_supported
2996 * @hw: pointer to the HW struct
2997 *
2998 * returns true if 100M speeds are supported by the device,
2999 * false otherwise.
3000 */
ice_is_100m_speed_supported(struct ice_hw * hw)3001 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3002 {
3003 switch (hw->device_id) {
3004 case ICE_DEV_ID_E822C_SGMII:
3005 case ICE_DEV_ID_E822L_SGMII:
3006 case ICE_DEV_ID_E823L_1GBE:
3007 case ICE_DEV_ID_E823C_SGMII:
3008 return true;
3009 default:
3010 return false;
3011 }
3012 }
3013
3014 /**
3015 * ice_get_link_speed_based_on_phy_type - returns link speed
3016 * @phy_type_low: lower part of phy_type
3017 * @phy_type_high: higher part of phy_type
3018 *
3019 * This helper function will convert an entry in PHY type structure
3020 * [phy_type_low, phy_type_high] to its corresponding link speed.
3021 * Note: In the structure of [phy_type_low, phy_type_high], there should
3022 * be one bit set, as this function will convert one PHY type to its
3023 * speed.
3024 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3025 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3026 */
3027 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3028 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3029 {
3030 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3031 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3032
3033 switch (phy_type_low) {
3034 case ICE_PHY_TYPE_LOW_100BASE_TX:
3035 case ICE_PHY_TYPE_LOW_100M_SGMII:
3036 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3037 break;
3038 case ICE_PHY_TYPE_LOW_1000BASE_T:
3039 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3040 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3041 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3042 case ICE_PHY_TYPE_LOW_1G_SGMII:
3043 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3044 break;
3045 case ICE_PHY_TYPE_LOW_2500BASE_T:
3046 case ICE_PHY_TYPE_LOW_2500BASE_X:
3047 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3048 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3049 break;
3050 case ICE_PHY_TYPE_LOW_5GBASE_T:
3051 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3052 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3053 break;
3054 case ICE_PHY_TYPE_LOW_10GBASE_T:
3055 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3056 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3057 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3058 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3059 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3060 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3061 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3062 break;
3063 case ICE_PHY_TYPE_LOW_25GBASE_T:
3064 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3065 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3066 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3067 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3068 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3069 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3070 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3071 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3072 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3073 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3074 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3075 break;
3076 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3077 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3078 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3079 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3080 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3081 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3082 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3083 break;
3084 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3085 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3086 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3087 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3088 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3089 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3090 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3091 case ICE_PHY_TYPE_LOW_50G_AUI2:
3092 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3093 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3094 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3095 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3096 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3097 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3098 case ICE_PHY_TYPE_LOW_50G_AUI1:
3099 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3100 break;
3101 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3102 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3103 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3104 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3105 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3106 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3107 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3108 case ICE_PHY_TYPE_LOW_100G_AUI4:
3109 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3110 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3111 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3112 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3113 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3114 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3115 break;
3116 default:
3117 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3118 break;
3119 }
3120
3121 switch (phy_type_high) {
3122 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3123 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3124 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3125 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3126 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3127 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3128 break;
3129 default:
3130 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3131 break;
3132 }
3133
3134 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3135 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3136 return ICE_AQ_LINK_SPEED_UNKNOWN;
3137 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3138 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3139 return ICE_AQ_LINK_SPEED_UNKNOWN;
3140 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3141 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3142 return speed_phy_type_low;
3143 else
3144 return speed_phy_type_high;
3145 }
3146
3147 /**
3148 * ice_update_phy_type
3149 * @phy_type_low: pointer to the lower part of phy_type
3150 * @phy_type_high: pointer to the higher part of phy_type
3151 * @link_speeds_bitmap: targeted link speeds bitmap
3152 *
3153 * Note: For the link_speeds_bitmap structure, you can check it at
3154 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3155 * link_speeds_bitmap include multiple speeds.
3156 *
3157 * Each entry in this [phy_type_low, phy_type_high] structure will
3158 * present a certain link speed. This helper function will turn on bits
3159 * in [phy_type_low, phy_type_high] structure based on the value of
3160 * link_speeds_bitmap input parameter.
3161 */
3162 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3163 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3164 u16 link_speeds_bitmap)
3165 {
3166 u64 pt_high;
3167 u64 pt_low;
3168 int index;
3169 u16 speed;
3170
3171 /* We first check with low part of phy_type */
3172 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3173 pt_low = BIT_ULL(index);
3174 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3175
3176 if (link_speeds_bitmap & speed)
3177 *phy_type_low |= BIT_ULL(index);
3178 }
3179
3180 /* We then check with high part of phy_type */
3181 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3182 pt_high = BIT_ULL(index);
3183 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3184
3185 if (link_speeds_bitmap & speed)
3186 *phy_type_high |= BIT_ULL(index);
3187 }
3188 }
3189
3190 /**
3191 * ice_aq_set_phy_cfg
3192 * @hw: pointer to the HW struct
3193 * @pi: port info structure of the interested logical port
3194 * @cfg: structure with PHY configuration data to be set
3195 * @cd: pointer to command details structure or NULL
3196 *
3197 * Set the various PHY configuration parameters supported on the Port.
3198 * One or more of the Set PHY config parameters may be ignored in an MFP
3199 * mode as the PF may not have the privilege to set some of the PHY Config
3200 * parameters. This status will be indicated by the command response (0x0601).
3201 */
3202 int
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3203 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3204 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3205 {
3206 struct ice_aq_desc desc;
3207 int status;
3208
3209 if (!cfg)
3210 return -EINVAL;
3211
3212 /* Ensure that only valid bits of cfg->caps can be turned on. */
3213 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3214 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3215 cfg->caps);
3216
3217 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3218 }
3219
3220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3221 desc.params.set_phy.lport_num = pi->lport;
3222 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3223
3224 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3225 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3226 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
3227 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3228 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
3229 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3230 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3231 cfg->low_power_ctrl_an);
3232 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3233 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3234 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3235 cfg->link_fec_opt);
3236
3237 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3238 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3239 status = 0;
3240
3241 if (!status)
3242 pi->phy.curr_user_phy_cfg = *cfg;
3243
3244 return status;
3245 }
3246
3247 /**
3248 * ice_update_link_info - update status of the HW network link
3249 * @pi: port info structure of the interested logical port
3250 */
ice_update_link_info(struct ice_port_info * pi)3251 int ice_update_link_info(struct ice_port_info *pi)
3252 {
3253 struct ice_link_status *li;
3254 int status;
3255
3256 if (!pi)
3257 return -EINVAL;
3258
3259 li = &pi->phy.link_info;
3260
3261 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3262 if (status)
3263 return status;
3264
3265 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3266 struct ice_aqc_get_phy_caps_data *pcaps;
3267 struct ice_hw *hw;
3268
3269 hw = pi->hw;
3270 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
3271 GFP_KERNEL);
3272 if (!pcaps)
3273 return -ENOMEM;
3274
3275 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3276 pcaps, NULL);
3277
3278 devm_kfree(ice_hw_to_dev(hw), pcaps);
3279 }
3280
3281 return status;
3282 }
3283
3284 /**
3285 * ice_cache_phy_user_req
3286 * @pi: port information structure
3287 * @cache_data: PHY logging data
3288 * @cache_mode: PHY logging mode
3289 *
3290 * Log the user request on (FC, FEC, SPEED) for later use.
3291 */
3292 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3293 ice_cache_phy_user_req(struct ice_port_info *pi,
3294 struct ice_phy_cache_mode_data cache_data,
3295 enum ice_phy_cache_mode cache_mode)
3296 {
3297 if (!pi)
3298 return;
3299
3300 switch (cache_mode) {
3301 case ICE_FC_MODE:
3302 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3303 break;
3304 case ICE_SPEED_MODE:
3305 pi->phy.curr_user_speed_req =
3306 cache_data.data.curr_user_speed_req;
3307 break;
3308 case ICE_FEC_MODE:
3309 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3310 break;
3311 default:
3312 break;
3313 }
3314 }
3315
3316 /**
3317 * ice_caps_to_fc_mode
3318 * @caps: PHY capabilities
3319 *
3320 * Convert PHY FC capabilities to ice FC mode
3321 */
ice_caps_to_fc_mode(u8 caps)3322 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3323 {
3324 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3325 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3326 return ICE_FC_FULL;
3327
3328 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3329 return ICE_FC_TX_PAUSE;
3330
3331 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3332 return ICE_FC_RX_PAUSE;
3333
3334 return ICE_FC_NONE;
3335 }
3336
3337 /**
3338 * ice_caps_to_fec_mode
3339 * @caps: PHY capabilities
3340 * @fec_options: Link FEC options
3341 *
3342 * Convert PHY FEC capabilities to ice FEC mode
3343 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3344 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3345 {
3346 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3347 return ICE_FEC_AUTO;
3348
3349 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3350 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3351 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3352 ICE_AQC_PHY_FEC_25G_KR_REQ))
3353 return ICE_FEC_BASER;
3354
3355 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3356 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3357 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3358 return ICE_FEC_RS;
3359
3360 return ICE_FEC_NONE;
3361 }
3362
3363 /**
3364 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3365 * @pi: port information structure
3366 * @cfg: PHY configuration data to set FC mode
3367 * @req_mode: FC mode to configure
3368 */
3369 int
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3370 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3371 enum ice_fc_mode req_mode)
3372 {
3373 struct ice_phy_cache_mode_data cache_data;
3374 u8 pause_mask = 0x0;
3375
3376 if (!pi || !cfg)
3377 return -EINVAL;
3378
3379 switch (req_mode) {
3380 case ICE_FC_FULL:
3381 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3382 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3383 break;
3384 case ICE_FC_RX_PAUSE:
3385 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3386 break;
3387 case ICE_FC_TX_PAUSE:
3388 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3389 break;
3390 default:
3391 break;
3392 }
3393
3394 /* clear the old pause settings */
3395 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3396 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3397
3398 /* set the new capabilities */
3399 cfg->caps |= pause_mask;
3400
3401 /* Cache user FC request */
3402 cache_data.data.curr_user_fc_req = req_mode;
3403 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3404
3405 return 0;
3406 }
3407
3408 /**
3409 * ice_set_fc
3410 * @pi: port information structure
3411 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3412 * @ena_auto_link_update: enable automatic link update
3413 *
3414 * Set the requested flow control mode.
3415 */
3416 int
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3417 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3418 {
3419 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3420 struct ice_aqc_get_phy_caps_data *pcaps;
3421 struct ice_hw *hw;
3422 int status;
3423
3424 if (!pi || !aq_failures)
3425 return -EINVAL;
3426
3427 *aq_failures = 0;
3428 hw = pi->hw;
3429
3430 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3431 if (!pcaps)
3432 return -ENOMEM;
3433
3434 /* Get the current PHY config */
3435 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3436 pcaps, NULL);
3437 if (status) {
3438 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3439 goto out;
3440 }
3441
3442 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3443
3444 /* Configure the set PHY data */
3445 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3446 if (status)
3447 goto out;
3448
3449 /* If the capabilities have changed, then set the new config */
3450 if (cfg.caps != pcaps->caps) {
3451 int retry_count, retry_max = 10;
3452
3453 /* Auto restart link so settings take effect */
3454 if (ena_auto_link_update)
3455 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3456
3457 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3458 if (status) {
3459 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3460 goto out;
3461 }
3462
3463 /* Update the link info
3464 * It sometimes takes a really long time for link to
3465 * come back from the atomic reset. Thus, we wait a
3466 * little bit.
3467 */
3468 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3469 status = ice_update_link_info(pi);
3470
3471 if (!status)
3472 break;
3473
3474 mdelay(100);
3475 }
3476
3477 if (status)
3478 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3479 }
3480
3481 out:
3482 devm_kfree(ice_hw_to_dev(hw), pcaps);
3483 return status;
3484 }
3485
3486 /**
3487 * ice_phy_caps_equals_cfg
3488 * @phy_caps: PHY capabilities
3489 * @phy_cfg: PHY configuration
3490 *
3491 * Helper function to determine if PHY capabilities matches PHY
3492 * configuration
3493 */
3494 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3495 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3496 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3497 {
3498 u8 caps_mask, cfg_mask;
3499
3500 if (!phy_caps || !phy_cfg)
3501 return false;
3502
3503 /* These bits are not common between capabilities and configuration.
3504 * Do not use them to determine equality.
3505 */
3506 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3507 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3508 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3509
3510 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3511 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3512 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3513 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3514 phy_caps->eee_cap != phy_cfg->eee_cap ||
3515 phy_caps->eeer_value != phy_cfg->eeer_value ||
3516 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3517 return false;
3518
3519 return true;
3520 }
3521
3522 /**
3523 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3524 * @pi: port information structure
3525 * @caps: PHY ability structure to copy date from
3526 * @cfg: PHY configuration structure to copy data to
3527 *
3528 * Helper function to copy AQC PHY get ability data to PHY set configuration
3529 * data structure
3530 */
3531 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3532 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3533 struct ice_aqc_get_phy_caps_data *caps,
3534 struct ice_aqc_set_phy_cfg_data *cfg)
3535 {
3536 if (!pi || !caps || !cfg)
3537 return;
3538
3539 memset(cfg, 0, sizeof(*cfg));
3540 cfg->phy_type_low = caps->phy_type_low;
3541 cfg->phy_type_high = caps->phy_type_high;
3542 cfg->caps = caps->caps;
3543 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3544 cfg->eee_cap = caps->eee_cap;
3545 cfg->eeer_value = caps->eeer_value;
3546 cfg->link_fec_opt = caps->link_fec_options;
3547 cfg->module_compliance_enforcement =
3548 caps->module_compliance_enforcement;
3549 }
3550
3551 /**
3552 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3553 * @pi: port information structure
3554 * @cfg: PHY configuration data to set FEC mode
3555 * @fec: FEC mode to configure
3556 */
3557 int
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3558 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3559 enum ice_fec_mode fec)
3560 {
3561 struct ice_aqc_get_phy_caps_data *pcaps;
3562 struct ice_hw *hw;
3563 int status;
3564
3565 if (!pi || !cfg)
3566 return -EINVAL;
3567
3568 hw = pi->hw;
3569
3570 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3571 if (!pcaps)
3572 return -ENOMEM;
3573
3574 status = ice_aq_get_phy_caps(pi, false,
3575 (ice_fw_supports_report_dflt_cfg(hw) ?
3576 ICE_AQC_REPORT_DFLT_CFG :
3577 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3578 if (status)
3579 goto out;
3580
3581 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3582 cfg->link_fec_opt = pcaps->link_fec_options;
3583
3584 switch (fec) {
3585 case ICE_FEC_BASER:
3586 /* Clear RS bits, and AND BASE-R ability
3587 * bits and OR request bits.
3588 */
3589 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3590 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3591 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3592 ICE_AQC_PHY_FEC_25G_KR_REQ;
3593 break;
3594 case ICE_FEC_RS:
3595 /* Clear BASE-R bits, and AND RS ability
3596 * bits and OR request bits.
3597 */
3598 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3599 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3600 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3601 break;
3602 case ICE_FEC_NONE:
3603 /* Clear all FEC option bits. */
3604 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3605 break;
3606 case ICE_FEC_AUTO:
3607 /* AND auto FEC bit, and all caps bits. */
3608 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3609 cfg->link_fec_opt |= pcaps->link_fec_options;
3610 break;
3611 default:
3612 status = -EINVAL;
3613 break;
3614 }
3615
3616 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3617 !ice_fw_supports_report_dflt_cfg(hw)) {
3618 struct ice_link_default_override_tlv tlv = { 0 };
3619
3620 status = ice_get_link_default_override(&tlv, pi);
3621 if (status)
3622 goto out;
3623
3624 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3625 (tlv.options & ICE_LINK_OVERRIDE_EN))
3626 cfg->link_fec_opt = tlv.fec_options;
3627 }
3628
3629 out:
3630 kfree(pcaps);
3631
3632 return status;
3633 }
3634
3635 /**
3636 * ice_get_link_status - get status of the HW network link
3637 * @pi: port information structure
3638 * @link_up: pointer to bool (true/false = linkup/linkdown)
3639 *
3640 * Variable link_up is true if link is up, false if link is down.
3641 * The variable link_up is invalid if status is non zero. As a
3642 * result of this call, link status reporting becomes enabled
3643 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3644 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3645 {
3646 struct ice_phy_info *phy_info;
3647 int status = 0;
3648
3649 if (!pi || !link_up)
3650 return -EINVAL;
3651
3652 phy_info = &pi->phy;
3653
3654 if (phy_info->get_link_info) {
3655 status = ice_update_link_info(pi);
3656
3657 if (status)
3658 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3659 status);
3660 }
3661
3662 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3663
3664 return status;
3665 }
3666
3667 /**
3668 * ice_aq_set_link_restart_an
3669 * @pi: pointer to the port information structure
3670 * @ena_link: if true: enable link, if false: disable link
3671 * @cd: pointer to command details structure or NULL
3672 *
3673 * Sets up the link and restarts the Auto-Negotiation over the link.
3674 */
3675 int
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3676 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3677 struct ice_sq_cd *cd)
3678 {
3679 struct ice_aqc_restart_an *cmd;
3680 struct ice_aq_desc desc;
3681
3682 cmd = &desc.params.restart_an;
3683
3684 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3685
3686 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3687 cmd->lport_num = pi->lport;
3688 if (ena_link)
3689 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3690 else
3691 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3692
3693 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3694 }
3695
3696 /**
3697 * ice_aq_set_event_mask
3698 * @hw: pointer to the HW struct
3699 * @port_num: port number of the physical function
3700 * @mask: event mask to be set
3701 * @cd: pointer to command details structure or NULL
3702 *
3703 * Set event mask (0x0613)
3704 */
3705 int
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3706 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3707 struct ice_sq_cd *cd)
3708 {
3709 struct ice_aqc_set_event_mask *cmd;
3710 struct ice_aq_desc desc;
3711
3712 cmd = &desc.params.set_event_mask;
3713
3714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3715
3716 cmd->lport_num = port_num;
3717
3718 cmd->event_mask = cpu_to_le16(mask);
3719 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3720 }
3721
3722 /**
3723 * ice_aq_set_mac_loopback
3724 * @hw: pointer to the HW struct
3725 * @ena_lpbk: Enable or Disable loopback
3726 * @cd: pointer to command details structure or NULL
3727 *
3728 * Enable/disable loopback on a given port
3729 */
3730 int
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3731 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3732 {
3733 struct ice_aqc_set_mac_lb *cmd;
3734 struct ice_aq_desc desc;
3735
3736 cmd = &desc.params.set_mac_lb;
3737
3738 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3739 if (ena_lpbk)
3740 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3741
3742 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3743 }
3744
3745 /**
3746 * ice_aq_set_port_id_led
3747 * @pi: pointer to the port information
3748 * @is_orig_mode: is this LED set to original mode (by the net-list)
3749 * @cd: pointer to command details structure or NULL
3750 *
3751 * Set LED value for the given port (0x06e9)
3752 */
3753 int
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3754 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3755 struct ice_sq_cd *cd)
3756 {
3757 struct ice_aqc_set_port_id_led *cmd;
3758 struct ice_hw *hw = pi->hw;
3759 struct ice_aq_desc desc;
3760
3761 cmd = &desc.params.set_port_id_led;
3762
3763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3764
3765 if (is_orig_mode)
3766 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3767 else
3768 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3769
3770 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3771 }
3772
3773 /**
3774 * ice_aq_get_port_options
3775 * @hw: pointer to the HW struct
3776 * @options: buffer for the resultant port options
3777 * @option_count: input - size of the buffer in port options structures,
3778 * output - number of returned port options
3779 * @lport: logical port to call the command with (optional)
3780 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
3781 * when PF owns more than 1 port it must be true
3782 * @active_option_idx: index of active port option in returned buffer
3783 * @active_option_valid: active option in returned buffer is valid
3784 * @pending_option_idx: index of pending port option in returned buffer
3785 * @pending_option_valid: pending option in returned buffer is valid
3786 *
3787 * Calls Get Port Options AQC (0x06ea) and verifies result.
3788 */
3789 int
ice_aq_get_port_options(struct ice_hw * hw,struct ice_aqc_get_port_options_elem * options,u8 * option_count,u8 lport,bool lport_valid,u8 * active_option_idx,bool * active_option_valid,u8 * pending_option_idx,bool * pending_option_valid)3790 ice_aq_get_port_options(struct ice_hw *hw,
3791 struct ice_aqc_get_port_options_elem *options,
3792 u8 *option_count, u8 lport, bool lport_valid,
3793 u8 *active_option_idx, bool *active_option_valid,
3794 u8 *pending_option_idx, bool *pending_option_valid)
3795 {
3796 struct ice_aqc_get_port_options *cmd;
3797 struct ice_aq_desc desc;
3798 int status;
3799 u8 i;
3800
3801 /* options buffer shall be able to hold max returned options */
3802 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
3803 return -EINVAL;
3804
3805 cmd = &desc.params.get_port_options;
3806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
3807
3808 if (lport_valid)
3809 cmd->lport_num = lport;
3810 cmd->lport_num_valid = lport_valid;
3811
3812 status = ice_aq_send_cmd(hw, &desc, options,
3813 *option_count * sizeof(*options), NULL);
3814 if (status)
3815 return status;
3816
3817 /* verify direct FW response & set output parameters */
3818 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
3819 cmd->port_options_count);
3820 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
3821 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
3822 cmd->port_options);
3823 if (*active_option_valid) {
3824 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
3825 cmd->port_options);
3826 if (*active_option_idx > (*option_count - 1))
3827 return -EIO;
3828 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
3829 *active_option_idx);
3830 }
3831
3832 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
3833 cmd->pending_port_option_status);
3834 if (*pending_option_valid) {
3835 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
3836 cmd->pending_port_option_status);
3837 if (*pending_option_idx > (*option_count - 1))
3838 return -EIO;
3839 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
3840 *pending_option_idx);
3841 }
3842
3843 /* mask output options fields */
3844 for (i = 0; i < *option_count; i++) {
3845 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
3846 options[i].pmd);
3847 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
3848 options[i].max_lane_speed);
3849 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
3850 options[i].pmd, options[i].max_lane_speed);
3851 }
3852
3853 return 0;
3854 }
3855
3856 /**
3857 * ice_aq_set_port_option
3858 * @hw: pointer to the HW struct
3859 * @lport: logical port to call the command with
3860 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
3861 * when PF owns more than 1 port it must be true
3862 * @new_option: new port option to be written
3863 *
3864 * Calls Set Port Options AQC (0x06eb).
3865 */
3866 int
ice_aq_set_port_option(struct ice_hw * hw,u8 lport,u8 lport_valid,u8 new_option)3867 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
3868 u8 new_option)
3869 {
3870 struct ice_aqc_set_port_option *cmd;
3871 struct ice_aq_desc desc;
3872
3873 if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
3874 return -EINVAL;
3875
3876 cmd = &desc.params.set_port_option;
3877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
3878
3879 if (lport_valid)
3880 cmd->lport_num = lport;
3881
3882 cmd->lport_num_valid = lport_valid;
3883 cmd->selected_port_option = new_option;
3884
3885 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3886 }
3887
3888 /**
3889 * ice_aq_sff_eeprom
3890 * @hw: pointer to the HW struct
3891 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3892 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3893 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3894 * @page: QSFP page
3895 * @set_page: set or ignore the page
3896 * @data: pointer to data buffer to be read/written to the I2C device.
3897 * @length: 1-16 for read, 1 for write.
3898 * @write: 0 read, 1 for write.
3899 * @cd: pointer to command details structure or NULL
3900 *
3901 * Read/Write SFF EEPROM (0x06EE)
3902 */
3903 int
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3904 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3905 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3906 bool write, struct ice_sq_cd *cd)
3907 {
3908 struct ice_aqc_sff_eeprom *cmd;
3909 struct ice_aq_desc desc;
3910 int status;
3911
3912 if (!data || (mem_addr & 0xff00))
3913 return -EINVAL;
3914
3915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3916 cmd = &desc.params.read_write_sff_param;
3917 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3918 cmd->lport_num = (u8)(lport & 0xff);
3919 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3920 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3921 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3922 ((set_page <<
3923 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3924 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3925 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3926 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3927 if (write)
3928 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3929
3930 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3931 return status;
3932 }
3933
ice_lut_type_to_size(enum ice_lut_type type)3934 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
3935 {
3936 switch (type) {
3937 case ICE_LUT_VSI:
3938 return ICE_LUT_VSI_SIZE;
3939 case ICE_LUT_GLOBAL:
3940 return ICE_LUT_GLOBAL_SIZE;
3941 case ICE_LUT_PF:
3942 return ICE_LUT_PF_SIZE;
3943 }
3944 WARN_ONCE(1, "incorrect type passed");
3945 return ICE_LUT_VSI_SIZE;
3946 }
3947
ice_lut_size_to_flag(enum ice_lut_size size)3948 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
3949 {
3950 switch (size) {
3951 case ICE_LUT_VSI_SIZE:
3952 return ICE_AQC_LUT_SIZE_SMALL;
3953 case ICE_LUT_GLOBAL_SIZE:
3954 return ICE_AQC_LUT_SIZE_512;
3955 case ICE_LUT_PF_SIZE:
3956 return ICE_AQC_LUT_SIZE_2K;
3957 }
3958 WARN_ONCE(1, "incorrect size passed");
3959 return 0;
3960 }
3961
3962 /**
3963 * __ice_aq_get_set_rss_lut
3964 * @hw: pointer to the hardware structure
3965 * @params: RSS LUT parameters
3966 * @set: set true to set the table, false to get the table
3967 *
3968 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3969 */
3970 static int
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3971 __ice_aq_get_set_rss_lut(struct ice_hw *hw,
3972 struct ice_aq_get_set_rss_lut_params *params, bool set)
3973 {
3974 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
3975 enum ice_lut_type lut_type = params->lut_type;
3976 struct ice_aqc_get_set_rss_lut *desc_params;
3977 enum ice_aqc_lut_flags flags;
3978 enum ice_lut_size lut_size;
3979 struct ice_aq_desc desc;
3980 u8 *lut = params->lut;
3981
3982
3983 if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
3984 return -EINVAL;
3985
3986 lut_size = ice_lut_type_to_size(lut_type);
3987 if (lut_size > params->lut_size)
3988 return -EINVAL;
3989 else if (set && lut_size != params->lut_size)
3990 return -EINVAL;
3991
3992 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
3993 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
3994 if (set)
3995 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3996
3997 desc_params = &desc.params.get_set_rss_lut;
3998 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3999 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
4000
4001 if (lut_type == ICE_LUT_GLOBAL)
4002 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
4003 params->global_lut_id);
4004
4005 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
4006 desc_params->flags = cpu_to_le16(flags);
4007
4008 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4009 }
4010
4011 /**
4012 * ice_aq_get_rss_lut
4013 * @hw: pointer to the hardware structure
4014 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4015 *
4016 * get the RSS lookup table, PF or VSI type
4017 */
4018 int
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4019 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4020 {
4021 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4022 }
4023
4024 /**
4025 * ice_aq_set_rss_lut
4026 * @hw: pointer to the hardware structure
4027 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4028 *
4029 * set the RSS lookup table, PF or VSI type
4030 */
4031 int
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4032 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4033 {
4034 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4035 }
4036
4037 /**
4038 * __ice_aq_get_set_rss_key
4039 * @hw: pointer to the HW struct
4040 * @vsi_id: VSI FW index
4041 * @key: pointer to key info struct
4042 * @set: set true to set the key, false to get the key
4043 *
4044 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4045 */
4046 static int
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4047 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4048 struct ice_aqc_get_set_rss_keys *key, bool set)
4049 {
4050 struct ice_aqc_get_set_rss_key *desc_params;
4051 u16 key_size = sizeof(*key);
4052 struct ice_aq_desc desc;
4053
4054 if (set) {
4055 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4056 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4057 } else {
4058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4059 }
4060
4061 desc_params = &desc.params.get_set_rss_key;
4062 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
4063
4064 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4065 }
4066
4067 /**
4068 * ice_aq_get_rss_key
4069 * @hw: pointer to the HW struct
4070 * @vsi_handle: software VSI handle
4071 * @key: pointer to key info struct
4072 *
4073 * get the RSS key per VSI
4074 */
4075 int
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4076 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4077 struct ice_aqc_get_set_rss_keys *key)
4078 {
4079 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4080 return -EINVAL;
4081
4082 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4083 key, false);
4084 }
4085
4086 /**
4087 * ice_aq_set_rss_key
4088 * @hw: pointer to the HW struct
4089 * @vsi_handle: software VSI handle
4090 * @keys: pointer to key info struct
4091 *
4092 * set the RSS key per VSI
4093 */
4094 int
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4095 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4096 struct ice_aqc_get_set_rss_keys *keys)
4097 {
4098 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4099 return -EINVAL;
4100
4101 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4102 keys, true);
4103 }
4104
4105 /**
4106 * ice_aq_add_lan_txq
4107 * @hw: pointer to the hardware structure
4108 * @num_qgrps: Number of added queue groups
4109 * @qg_list: list of queue groups to be added
4110 * @buf_size: size of buffer for indirect command
4111 * @cd: pointer to command details structure or NULL
4112 *
4113 * Add Tx LAN queue (0x0C30)
4114 *
4115 * NOTE:
4116 * Prior to calling add Tx LAN queue:
4117 * Initialize the following as part of the Tx queue context:
4118 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4119 * Cache profile and Packet shaper profile.
4120 *
4121 * After add Tx LAN queue AQ command is completed:
4122 * Interrupts should be associated with specific queues,
4123 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4124 * flow.
4125 */
4126 static int
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4127 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4128 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4129 struct ice_sq_cd *cd)
4130 {
4131 struct ice_aqc_add_tx_qgrp *list;
4132 struct ice_aqc_add_txqs *cmd;
4133 struct ice_aq_desc desc;
4134 u16 i, sum_size = 0;
4135
4136 cmd = &desc.params.add_txqs;
4137
4138 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4139
4140 if (!qg_list)
4141 return -EINVAL;
4142
4143 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4144 return -EINVAL;
4145
4146 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4147 sum_size += struct_size(list, txqs, list->num_txqs);
4148 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4149 list->num_txqs);
4150 }
4151
4152 if (buf_size != sum_size)
4153 return -EINVAL;
4154
4155 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4156
4157 cmd->num_qgrps = num_qgrps;
4158
4159 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4160 }
4161
4162 /**
4163 * ice_aq_dis_lan_txq
4164 * @hw: pointer to the hardware structure
4165 * @num_qgrps: number of groups in the list
4166 * @qg_list: the list of groups to disable
4167 * @buf_size: the total size of the qg_list buffer in bytes
4168 * @rst_src: if called due to reset, specifies the reset source
4169 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4170 * @cd: pointer to command details structure or NULL
4171 *
4172 * Disable LAN Tx queue (0x0C31)
4173 */
4174 static int
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4175 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4176 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4177 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4178 struct ice_sq_cd *cd)
4179 {
4180 struct ice_aqc_dis_txq_item *item;
4181 struct ice_aqc_dis_txqs *cmd;
4182 struct ice_aq_desc desc;
4183 u16 i, sz = 0;
4184 int status;
4185
4186 cmd = &desc.params.dis_txqs;
4187 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4188
4189 /* qg_list can be NULL only in VM/VF reset flow */
4190 if (!qg_list && !rst_src)
4191 return -EINVAL;
4192
4193 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4194 return -EINVAL;
4195
4196 cmd->num_entries = num_qgrps;
4197
4198 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4199 ICE_AQC_Q_DIS_TIMEOUT_M);
4200
4201 switch (rst_src) {
4202 case ICE_VM_RESET:
4203 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4204 cmd->vmvf_and_timeout |=
4205 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4206 break;
4207 case ICE_VF_RESET:
4208 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4209 /* In this case, FW expects vmvf_num to be absolute VF ID */
4210 cmd->vmvf_and_timeout |=
4211 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
4212 ICE_AQC_Q_DIS_VMVF_NUM_M);
4213 break;
4214 case ICE_NO_RESET:
4215 default:
4216 break;
4217 }
4218
4219 /* flush pipe on time out */
4220 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4221 /* If no queue group info, we are in a reset flow. Issue the AQ */
4222 if (!qg_list)
4223 goto do_aq;
4224
4225 /* set RD bit to indicate that command buffer is provided by the driver
4226 * and it needs to be read by the firmware
4227 */
4228 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4229
4230 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4231 u16 item_size = struct_size(item, q_id, item->num_qs);
4232
4233 /* If the num of queues is even, add 2 bytes of padding */
4234 if ((item->num_qs % 2) == 0)
4235 item_size += 2;
4236
4237 sz += item_size;
4238
4239 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4240 }
4241
4242 if (buf_size != sz)
4243 return -EINVAL;
4244
4245 do_aq:
4246 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4247 if (status) {
4248 if (!qg_list)
4249 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4250 vmvf_num, hw->adminq.sq_last_status);
4251 else
4252 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4253 le16_to_cpu(qg_list[0].q_id[0]),
4254 hw->adminq.sq_last_status);
4255 }
4256 return status;
4257 }
4258
4259 /**
4260 * ice_aq_cfg_lan_txq
4261 * @hw: pointer to the hardware structure
4262 * @buf: buffer for command
4263 * @buf_size: size of buffer in bytes
4264 * @num_qs: number of queues being configured
4265 * @oldport: origination lport
4266 * @newport: destination lport
4267 * @cd: pointer to command details structure or NULL
4268 *
4269 * Move/Configure LAN Tx queue (0x0C32)
4270 *
4271 * There is a better AQ command to use for moving nodes, so only coding
4272 * this one for configuring the node.
4273 */
4274 int
ice_aq_cfg_lan_txq(struct ice_hw * hw,struct ice_aqc_cfg_txqs_buf * buf,u16 buf_size,u16 num_qs,u8 oldport,u8 newport,struct ice_sq_cd * cd)4275 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
4276 u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
4277 struct ice_sq_cd *cd)
4278 {
4279 struct ice_aqc_cfg_txqs *cmd;
4280 struct ice_aq_desc desc;
4281 int status;
4282
4283 cmd = &desc.params.cfg_txqs;
4284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
4285 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4286
4287 if (!buf)
4288 return -EINVAL;
4289
4290 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
4291 cmd->num_qs = num_qs;
4292 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
4293 cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
4294 ICE_AQC_Q_CFG_DST_PRT_M;
4295 cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
4296 ICE_AQC_Q_CFG_TIMEOUT_M;
4297 cmd->blocked_cgds = 0;
4298
4299 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4300 if (status)
4301 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
4302 hw->adminq.sq_last_status);
4303 return status;
4304 }
4305
4306 /**
4307 * ice_aq_add_rdma_qsets
4308 * @hw: pointer to the hardware structure
4309 * @num_qset_grps: Number of RDMA Qset groups
4310 * @qset_list: list of Qset groups to be added
4311 * @buf_size: size of buffer for indirect command
4312 * @cd: pointer to command details structure or NULL
4313 *
4314 * Add Tx RDMA Qsets (0x0C33)
4315 */
4316 static int
ice_aq_add_rdma_qsets(struct ice_hw * hw,u8 num_qset_grps,struct ice_aqc_add_rdma_qset_data * qset_list,u16 buf_size,struct ice_sq_cd * cd)4317 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4318 struct ice_aqc_add_rdma_qset_data *qset_list,
4319 u16 buf_size, struct ice_sq_cd *cd)
4320 {
4321 struct ice_aqc_add_rdma_qset_data *list;
4322 struct ice_aqc_add_rdma_qset *cmd;
4323 struct ice_aq_desc desc;
4324 u16 i, sum_size = 0;
4325
4326 cmd = &desc.params.add_rdma_qset;
4327
4328 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4329
4330 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4331 return -EINVAL;
4332
4333 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4334 u16 num_qsets = le16_to_cpu(list->num_qsets);
4335
4336 sum_size += struct_size(list, rdma_qsets, num_qsets);
4337 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4338 num_qsets);
4339 }
4340
4341 if (buf_size != sum_size)
4342 return -EINVAL;
4343
4344 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4345
4346 cmd->num_qset_grps = num_qset_grps;
4347
4348 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4349 }
4350
4351 /* End of FW Admin Queue command wrappers */
4352
4353 /**
4354 * ice_write_byte - write a byte to a packed context structure
4355 * @src_ctx: the context structure to read from
4356 * @dest_ctx: the context to be written to
4357 * @ce_info: a description of the struct to be filled
4358 */
4359 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4360 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4361 {
4362 u8 src_byte, dest_byte, mask;
4363 u8 *from, *dest;
4364 u16 shift_width;
4365
4366 /* copy from the next struct field */
4367 from = src_ctx + ce_info->offset;
4368
4369 /* prepare the bits and mask */
4370 shift_width = ce_info->lsb % 8;
4371 mask = (u8)(BIT(ce_info->width) - 1);
4372
4373 src_byte = *from;
4374 src_byte &= mask;
4375
4376 /* shift to correct alignment */
4377 mask <<= shift_width;
4378 src_byte <<= shift_width;
4379
4380 /* get the current bits from the target bit string */
4381 dest = dest_ctx + (ce_info->lsb / 8);
4382
4383 memcpy(&dest_byte, dest, sizeof(dest_byte));
4384
4385 dest_byte &= ~mask; /* get the bits not changing */
4386 dest_byte |= src_byte; /* add in the new bits */
4387
4388 /* put it all back */
4389 memcpy(dest, &dest_byte, sizeof(dest_byte));
4390 }
4391
4392 /**
4393 * ice_write_word - write a word to a packed context structure
4394 * @src_ctx: the context structure to read from
4395 * @dest_ctx: the context to be written to
4396 * @ce_info: a description of the struct to be filled
4397 */
4398 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4399 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4400 {
4401 u16 src_word, mask;
4402 __le16 dest_word;
4403 u8 *from, *dest;
4404 u16 shift_width;
4405
4406 /* copy from the next struct field */
4407 from = src_ctx + ce_info->offset;
4408
4409 /* prepare the bits and mask */
4410 shift_width = ce_info->lsb % 8;
4411 mask = BIT(ce_info->width) - 1;
4412
4413 /* don't swizzle the bits until after the mask because the mask bits
4414 * will be in a different bit position on big endian machines
4415 */
4416 src_word = *(u16 *)from;
4417 src_word &= mask;
4418
4419 /* shift to correct alignment */
4420 mask <<= shift_width;
4421 src_word <<= shift_width;
4422
4423 /* get the current bits from the target bit string */
4424 dest = dest_ctx + (ce_info->lsb / 8);
4425
4426 memcpy(&dest_word, dest, sizeof(dest_word));
4427
4428 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
4429 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
4430
4431 /* put it all back */
4432 memcpy(dest, &dest_word, sizeof(dest_word));
4433 }
4434
4435 /**
4436 * ice_write_dword - write a dword to a packed context structure
4437 * @src_ctx: the context structure to read from
4438 * @dest_ctx: the context to be written to
4439 * @ce_info: a description of the struct to be filled
4440 */
4441 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4442 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4443 {
4444 u32 src_dword, mask;
4445 __le32 dest_dword;
4446 u8 *from, *dest;
4447 u16 shift_width;
4448
4449 /* copy from the next struct field */
4450 from = src_ctx + ce_info->offset;
4451
4452 /* prepare the bits and mask */
4453 shift_width = ce_info->lsb % 8;
4454
4455 /* if the field width is exactly 32 on an x86 machine, then the shift
4456 * operation will not work because the SHL instructions count is masked
4457 * to 5 bits so the shift will do nothing
4458 */
4459 if (ce_info->width < 32)
4460 mask = BIT(ce_info->width) - 1;
4461 else
4462 mask = (u32)~0;
4463
4464 /* don't swizzle the bits until after the mask because the mask bits
4465 * will be in a different bit position on big endian machines
4466 */
4467 src_dword = *(u32 *)from;
4468 src_dword &= mask;
4469
4470 /* shift to correct alignment */
4471 mask <<= shift_width;
4472 src_dword <<= shift_width;
4473
4474 /* get the current bits from the target bit string */
4475 dest = dest_ctx + (ce_info->lsb / 8);
4476
4477 memcpy(&dest_dword, dest, sizeof(dest_dword));
4478
4479 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4480 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4481
4482 /* put it all back */
4483 memcpy(dest, &dest_dword, sizeof(dest_dword));
4484 }
4485
4486 /**
4487 * ice_write_qword - write a qword to a packed context structure
4488 * @src_ctx: the context structure to read from
4489 * @dest_ctx: the context to be written to
4490 * @ce_info: a description of the struct to be filled
4491 */
4492 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4493 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4494 {
4495 u64 src_qword, mask;
4496 __le64 dest_qword;
4497 u8 *from, *dest;
4498 u16 shift_width;
4499
4500 /* copy from the next struct field */
4501 from = src_ctx + ce_info->offset;
4502
4503 /* prepare the bits and mask */
4504 shift_width = ce_info->lsb % 8;
4505
4506 /* if the field width is exactly 64 on an x86 machine, then the shift
4507 * operation will not work because the SHL instructions count is masked
4508 * to 6 bits so the shift will do nothing
4509 */
4510 if (ce_info->width < 64)
4511 mask = BIT_ULL(ce_info->width) - 1;
4512 else
4513 mask = (u64)~0;
4514
4515 /* don't swizzle the bits until after the mask because the mask bits
4516 * will be in a different bit position on big endian machines
4517 */
4518 src_qword = *(u64 *)from;
4519 src_qword &= mask;
4520
4521 /* shift to correct alignment */
4522 mask <<= shift_width;
4523 src_qword <<= shift_width;
4524
4525 /* get the current bits from the target bit string */
4526 dest = dest_ctx + (ce_info->lsb / 8);
4527
4528 memcpy(&dest_qword, dest, sizeof(dest_qword));
4529
4530 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4531 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4532
4533 /* put it all back */
4534 memcpy(dest, &dest_qword, sizeof(dest_qword));
4535 }
4536
4537 /**
4538 * ice_set_ctx - set context bits in packed structure
4539 * @hw: pointer to the hardware structure
4540 * @src_ctx: pointer to a generic non-packed context structure
4541 * @dest_ctx: pointer to memory for the packed structure
4542 * @ce_info: a description of the structure to be transformed
4543 */
4544 int
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4545 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4546 const struct ice_ctx_ele *ce_info)
4547 {
4548 int f;
4549
4550 for (f = 0; ce_info[f].width; f++) {
4551 /* We have to deal with each element of the FW response
4552 * using the correct size so that we are correct regardless
4553 * of the endianness of the machine.
4554 */
4555 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4556 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4557 f, ce_info[f].width, ce_info[f].size_of);
4558 continue;
4559 }
4560 switch (ce_info[f].size_of) {
4561 case sizeof(u8):
4562 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4563 break;
4564 case sizeof(u16):
4565 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4566 break;
4567 case sizeof(u32):
4568 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4569 break;
4570 case sizeof(u64):
4571 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4572 break;
4573 default:
4574 return -EINVAL;
4575 }
4576 }
4577
4578 return 0;
4579 }
4580
4581 /**
4582 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4583 * @hw: pointer to the HW struct
4584 * @vsi_handle: software VSI handle
4585 * @tc: TC number
4586 * @q_handle: software queue handle
4587 */
4588 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)4589 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4590 {
4591 struct ice_vsi_ctx *vsi;
4592 struct ice_q_ctx *q_ctx;
4593
4594 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4595 if (!vsi)
4596 return NULL;
4597 if (q_handle >= vsi->num_lan_q_entries[tc])
4598 return NULL;
4599 if (!vsi->lan_q_ctx[tc])
4600 return NULL;
4601 q_ctx = vsi->lan_q_ctx[tc];
4602 return &q_ctx[q_handle];
4603 }
4604
4605 /**
4606 * ice_ena_vsi_txq
4607 * @pi: port information structure
4608 * @vsi_handle: software VSI handle
4609 * @tc: TC number
4610 * @q_handle: software queue handle
4611 * @num_qgrps: Number of added queue groups
4612 * @buf: list of queue groups to be added
4613 * @buf_size: size of buffer for indirect command
4614 * @cd: pointer to command details structure or NULL
4615 *
4616 * This function adds one LAN queue
4617 */
4618 int
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)4619 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4620 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4621 struct ice_sq_cd *cd)
4622 {
4623 struct ice_aqc_txsched_elem_data node = { 0 };
4624 struct ice_sched_node *parent;
4625 struct ice_q_ctx *q_ctx;
4626 struct ice_hw *hw;
4627 int status;
4628
4629 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4630 return -EIO;
4631
4632 if (num_qgrps > 1 || buf->num_txqs > 1)
4633 return -ENOSPC;
4634
4635 hw = pi->hw;
4636
4637 if (!ice_is_vsi_valid(hw, vsi_handle))
4638 return -EINVAL;
4639
4640 mutex_lock(&pi->sched_lock);
4641
4642 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4643 if (!q_ctx) {
4644 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4645 q_handle);
4646 status = -EINVAL;
4647 goto ena_txq_exit;
4648 }
4649
4650 /* find a parent node */
4651 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4652 ICE_SCHED_NODE_OWNER_LAN);
4653 if (!parent) {
4654 status = -EINVAL;
4655 goto ena_txq_exit;
4656 }
4657
4658 buf->parent_teid = parent->info.node_teid;
4659 node.parent_teid = parent->info.node_teid;
4660 /* Mark that the values in the "generic" section as valid. The default
4661 * value in the "generic" section is zero. This means that :
4662 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4663 * - 0 priority among siblings, indicated by Bit 1-3.
4664 * - WFQ, indicated by Bit 4.
4665 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4666 * Bit 5-6.
4667 * - Bit 7 is reserved.
4668 * Without setting the generic section as valid in valid_sections, the
4669 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4670 */
4671 buf->txqs[0].info.valid_sections =
4672 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4673 ICE_AQC_ELEM_VALID_EIR;
4674 buf->txqs[0].info.generic = 0;
4675 buf->txqs[0].info.cir_bw.bw_profile_idx =
4676 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4677 buf->txqs[0].info.cir_bw.bw_alloc =
4678 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4679 buf->txqs[0].info.eir_bw.bw_profile_idx =
4680 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4681 buf->txqs[0].info.eir_bw.bw_alloc =
4682 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4683
4684 /* add the LAN queue */
4685 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4686 if (status) {
4687 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4688 le16_to_cpu(buf->txqs[0].txq_id),
4689 hw->adminq.sq_last_status);
4690 goto ena_txq_exit;
4691 }
4692
4693 node.node_teid = buf->txqs[0].q_teid;
4694 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4695 q_ctx->q_handle = q_handle;
4696 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4697
4698 /* add a leaf node into scheduler tree queue layer */
4699 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
4700 if (!status)
4701 status = ice_sched_replay_q_bw(pi, q_ctx);
4702
4703 ena_txq_exit:
4704 mutex_unlock(&pi->sched_lock);
4705 return status;
4706 }
4707
4708 /**
4709 * ice_dis_vsi_txq
4710 * @pi: port information structure
4711 * @vsi_handle: software VSI handle
4712 * @tc: TC number
4713 * @num_queues: number of queues
4714 * @q_handles: pointer to software queue handle array
4715 * @q_ids: pointer to the q_id array
4716 * @q_teids: pointer to queue node teids
4717 * @rst_src: if called due to reset, specifies the reset source
4718 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4719 * @cd: pointer to command details structure or NULL
4720 *
4721 * This function removes queues and their corresponding nodes in SW DB
4722 */
4723 int
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4724 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4725 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4726 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4727 struct ice_sq_cd *cd)
4728 {
4729 struct ice_aqc_dis_txq_item *qg_list;
4730 struct ice_q_ctx *q_ctx;
4731 int status = -ENOENT;
4732 struct ice_hw *hw;
4733 u16 i, buf_size;
4734
4735 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4736 return -EIO;
4737
4738 hw = pi->hw;
4739
4740 if (!num_queues) {
4741 /* if queue is disabled already yet the disable queue command
4742 * has to be sent to complete the VF reset, then call
4743 * ice_aq_dis_lan_txq without any queue information
4744 */
4745 if (rst_src)
4746 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4747 vmvf_num, NULL);
4748 return -EIO;
4749 }
4750
4751 buf_size = struct_size(qg_list, q_id, 1);
4752 qg_list = kzalloc(buf_size, GFP_KERNEL);
4753 if (!qg_list)
4754 return -ENOMEM;
4755
4756 mutex_lock(&pi->sched_lock);
4757
4758 for (i = 0; i < num_queues; i++) {
4759 struct ice_sched_node *node;
4760
4761 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4762 if (!node)
4763 continue;
4764 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4765 if (!q_ctx) {
4766 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4767 q_handles[i]);
4768 continue;
4769 }
4770 if (q_ctx->q_handle != q_handles[i]) {
4771 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4772 q_ctx->q_handle, q_handles[i]);
4773 continue;
4774 }
4775 qg_list->parent_teid = node->info.parent_teid;
4776 qg_list->num_qs = 1;
4777 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4778 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4779 vmvf_num, cd);
4780
4781 if (status)
4782 break;
4783 ice_free_sched_node(pi, node);
4784 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4785 q_ctx->q_teid = ICE_INVAL_TEID;
4786 }
4787 mutex_unlock(&pi->sched_lock);
4788 kfree(qg_list);
4789 return status;
4790 }
4791
4792 /**
4793 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4794 * @pi: port information structure
4795 * @vsi_handle: software VSI handle
4796 * @tc_bitmap: TC bitmap
4797 * @maxqs: max queues array per TC
4798 * @owner: LAN or RDMA
4799 *
4800 * This function adds/updates the VSI queues per TC.
4801 */
4802 static int
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * maxqs,u8 owner)4803 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4804 u16 *maxqs, u8 owner)
4805 {
4806 int status = 0;
4807 u8 i;
4808
4809 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4810 return -EIO;
4811
4812 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4813 return -EINVAL;
4814
4815 mutex_lock(&pi->sched_lock);
4816
4817 ice_for_each_traffic_class(i) {
4818 /* configuration is possible only if TC node is present */
4819 if (!ice_sched_get_tc_node(pi, i))
4820 continue;
4821
4822 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4823 ice_is_tc_ena(tc_bitmap, i));
4824 if (status)
4825 break;
4826 }
4827
4828 mutex_unlock(&pi->sched_lock);
4829 return status;
4830 }
4831
4832 /**
4833 * ice_cfg_vsi_lan - configure VSI LAN queues
4834 * @pi: port information structure
4835 * @vsi_handle: software VSI handle
4836 * @tc_bitmap: TC bitmap
4837 * @max_lanqs: max LAN queues array per TC
4838 *
4839 * This function adds/updates the VSI LAN queues per TC.
4840 */
4841 int
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * max_lanqs)4842 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4843 u16 *max_lanqs)
4844 {
4845 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4846 ICE_SCHED_NODE_OWNER_LAN);
4847 }
4848
4849 /**
4850 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4851 * @pi: port information structure
4852 * @vsi_handle: software VSI handle
4853 * @tc_bitmap: TC bitmap
4854 * @max_rdmaqs: max RDMA queues array per TC
4855 *
4856 * This function adds/updates the VSI RDMA queues per TC.
4857 */
4858 int
ice_cfg_vsi_rdma(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_rdmaqs)4859 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4860 u16 *max_rdmaqs)
4861 {
4862 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4863 ICE_SCHED_NODE_OWNER_RDMA);
4864 }
4865
4866 /**
4867 * ice_ena_vsi_rdma_qset
4868 * @pi: port information structure
4869 * @vsi_handle: software VSI handle
4870 * @tc: TC number
4871 * @rdma_qset: pointer to RDMA Qset
4872 * @num_qsets: number of RDMA Qsets
4873 * @qset_teid: pointer to Qset node TEIDs
4874 *
4875 * This function adds RDMA Qset
4876 */
4877 int
ice_ena_vsi_rdma_qset(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 * rdma_qset,u16 num_qsets,u32 * qset_teid)4878 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4879 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4880 {
4881 struct ice_aqc_txsched_elem_data node = { 0 };
4882 struct ice_aqc_add_rdma_qset_data *buf;
4883 struct ice_sched_node *parent;
4884 struct ice_hw *hw;
4885 u16 i, buf_size;
4886 int ret;
4887
4888 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4889 return -EIO;
4890 hw = pi->hw;
4891
4892 if (!ice_is_vsi_valid(hw, vsi_handle))
4893 return -EINVAL;
4894
4895 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4896 buf = kzalloc(buf_size, GFP_KERNEL);
4897 if (!buf)
4898 return -ENOMEM;
4899 mutex_lock(&pi->sched_lock);
4900
4901 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4902 ICE_SCHED_NODE_OWNER_RDMA);
4903 if (!parent) {
4904 ret = -EINVAL;
4905 goto rdma_error_exit;
4906 }
4907 buf->parent_teid = parent->info.node_teid;
4908 node.parent_teid = parent->info.node_teid;
4909
4910 buf->num_qsets = cpu_to_le16(num_qsets);
4911 for (i = 0; i < num_qsets; i++) {
4912 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4913 buf->rdma_qsets[i].info.valid_sections =
4914 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4915 ICE_AQC_ELEM_VALID_EIR;
4916 buf->rdma_qsets[i].info.generic = 0;
4917 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4918 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4919 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4920 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4921 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4922 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4923 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4924 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4925 }
4926 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4927 if (ret) {
4928 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4929 goto rdma_error_exit;
4930 }
4931 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4932 for (i = 0; i < num_qsets; i++) {
4933 node.node_teid = buf->rdma_qsets[i].qset_teid;
4934 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4935 &node, NULL);
4936 if (ret)
4937 break;
4938 qset_teid[i] = le32_to_cpu(node.node_teid);
4939 }
4940 rdma_error_exit:
4941 mutex_unlock(&pi->sched_lock);
4942 kfree(buf);
4943 return ret;
4944 }
4945
4946 /**
4947 * ice_dis_vsi_rdma_qset - free RDMA resources
4948 * @pi: port_info struct
4949 * @count: number of RDMA Qsets to free
4950 * @qset_teid: TEID of Qset node
4951 * @q_id: list of queue IDs being disabled
4952 */
4953 int
ice_dis_vsi_rdma_qset(struct ice_port_info * pi,u16 count,u32 * qset_teid,u16 * q_id)4954 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4955 u16 *q_id)
4956 {
4957 struct ice_aqc_dis_txq_item *qg_list;
4958 struct ice_hw *hw;
4959 int status = 0;
4960 u16 qg_size;
4961 int i;
4962
4963 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4964 return -EIO;
4965
4966 hw = pi->hw;
4967
4968 qg_size = struct_size(qg_list, q_id, 1);
4969 qg_list = kzalloc(qg_size, GFP_KERNEL);
4970 if (!qg_list)
4971 return -ENOMEM;
4972
4973 mutex_lock(&pi->sched_lock);
4974
4975 for (i = 0; i < count; i++) {
4976 struct ice_sched_node *node;
4977
4978 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4979 if (!node)
4980 continue;
4981
4982 qg_list->parent_teid = node->info.parent_teid;
4983 qg_list->num_qs = 1;
4984 qg_list->q_id[0] =
4985 cpu_to_le16(q_id[i] |
4986 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4987
4988 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4989 ICE_NO_RESET, 0, NULL);
4990 if (status)
4991 break;
4992
4993 ice_free_sched_node(pi, node);
4994 }
4995
4996 mutex_unlock(&pi->sched_lock);
4997 kfree(qg_list);
4998 return status;
4999 }
5000
5001 /**
5002 * ice_replay_pre_init - replay pre initialization
5003 * @hw: pointer to the HW struct
5004 *
5005 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5006 */
ice_replay_pre_init(struct ice_hw * hw)5007 static int ice_replay_pre_init(struct ice_hw *hw)
5008 {
5009 struct ice_switch_info *sw = hw->switch_info;
5010 u8 i;
5011
5012 /* Delete old entries from replay filter list head if there is any */
5013 ice_rm_all_sw_replay_rule_info(hw);
5014 /* In start of replay, move entries into replay_rules list, it
5015 * will allow adding rules entries back to filt_rules list,
5016 * which is operational list.
5017 */
5018 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5019 list_replace_init(&sw->recp_list[i].filt_rules,
5020 &sw->recp_list[i].filt_replay_rules);
5021 ice_sched_replay_agg_vsi_preinit(hw);
5022
5023 return 0;
5024 }
5025
5026 /**
5027 * ice_replay_vsi - replay VSI configuration
5028 * @hw: pointer to the HW struct
5029 * @vsi_handle: driver VSI handle
5030 *
5031 * Restore all VSI configuration after reset. It is required to call this
5032 * function with main VSI first.
5033 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5034 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5035 {
5036 int status;
5037
5038 if (!ice_is_vsi_valid(hw, vsi_handle))
5039 return -EINVAL;
5040
5041 /* Replay pre-initialization if there is any */
5042 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
5043 status = ice_replay_pre_init(hw);
5044 if (status)
5045 return status;
5046 }
5047 /* Replay per VSI all RSS configurations */
5048 status = ice_replay_rss_cfg(hw, vsi_handle);
5049 if (status)
5050 return status;
5051 /* Replay per VSI all filters */
5052 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
5053 if (!status)
5054 status = ice_replay_vsi_agg(hw, vsi_handle);
5055 return status;
5056 }
5057
5058 /**
5059 * ice_replay_post - post replay configuration cleanup
5060 * @hw: pointer to the HW struct
5061 *
5062 * Post replay cleanup.
5063 */
ice_replay_post(struct ice_hw * hw)5064 void ice_replay_post(struct ice_hw *hw)
5065 {
5066 /* Delete old entries from replay filter list head */
5067 ice_rm_all_sw_replay_rule_info(hw);
5068 ice_sched_replay_agg(hw);
5069 }
5070
5071 /**
5072 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5073 * @hw: ptr to the hardware info
5074 * @reg: offset of 64 bit HW register to read from
5075 * @prev_stat_loaded: bool to specify if previous stats are loaded
5076 * @prev_stat: ptr to previous loaded stat value
5077 * @cur_stat: ptr to current stat value
5078 */
5079 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5080 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5081 u64 *prev_stat, u64 *cur_stat)
5082 {
5083 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5084
5085 /* device stats are not reset at PFR, they likely will not be zeroed
5086 * when the driver starts. Thus, save the value from the first read
5087 * without adding to the statistic value so that we report stats which
5088 * count up from zero.
5089 */
5090 if (!prev_stat_loaded) {
5091 *prev_stat = new_data;
5092 return;
5093 }
5094
5095 /* Calculate the difference between the new and old values, and then
5096 * add it to the software stat value.
5097 */
5098 if (new_data >= *prev_stat)
5099 *cur_stat += new_data - *prev_stat;
5100 else
5101 /* to manage the potential roll-over */
5102 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5103
5104 /* Update the previously stored value to prepare for next read */
5105 *prev_stat = new_data;
5106 }
5107
5108 /**
5109 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5110 * @hw: ptr to the hardware info
5111 * @reg: offset of HW register to read from
5112 * @prev_stat_loaded: bool to specify if previous stats are loaded
5113 * @prev_stat: ptr to previous loaded stat value
5114 * @cur_stat: ptr to current stat value
5115 */
5116 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5117 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5118 u64 *prev_stat, u64 *cur_stat)
5119 {
5120 u32 new_data;
5121
5122 new_data = rd32(hw, reg);
5123
5124 /* device stats are not reset at PFR, they likely will not be zeroed
5125 * when the driver starts. Thus, save the value from the first read
5126 * without adding to the statistic value so that we report stats which
5127 * count up from zero.
5128 */
5129 if (!prev_stat_loaded) {
5130 *prev_stat = new_data;
5131 return;
5132 }
5133
5134 /* Calculate the difference between the new and old values, and then
5135 * add it to the software stat value.
5136 */
5137 if (new_data >= *prev_stat)
5138 *cur_stat += new_data - *prev_stat;
5139 else
5140 /* to manage the potential roll-over */
5141 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5142
5143 /* Update the previously stored value to prepare for next read */
5144 *prev_stat = new_data;
5145 }
5146
5147 /**
5148 * ice_sched_query_elem - query element information from HW
5149 * @hw: pointer to the HW struct
5150 * @node_teid: node TEID to be queried
5151 * @buf: buffer to element information
5152 *
5153 * This function queries HW element information
5154 */
5155 int
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)5156 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5157 struct ice_aqc_txsched_elem_data *buf)
5158 {
5159 u16 buf_size, num_elem_ret = 0;
5160 int status;
5161
5162 buf_size = sizeof(*buf);
5163 memset(buf, 0, buf_size);
5164 buf->node_teid = cpu_to_le32(node_teid);
5165 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5166 NULL);
5167 if (status || num_elem_ret != 1)
5168 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5169 return status;
5170 }
5171
5172 /**
5173 * ice_aq_read_i2c
5174 * @hw: pointer to the hw struct
5175 * @topo_addr: topology address for a device to communicate with
5176 * @bus_addr: 7-bit I2C bus address
5177 * @addr: I2C memory address (I2C offset) with up to 16 bits
5178 * @params: I2C parameters: bit [7] - Repeated start,
5179 * bits [6:5] data offset size,
5180 * bit [4] - I2C address type,
5181 * bits [3:0] - data size to read (0-16 bytes)
5182 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5183 * @cd: pointer to command details structure or NULL
5184 *
5185 * Read I2C (0x06E2)
5186 */
5187 int
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)5188 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5189 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5190 struct ice_sq_cd *cd)
5191 {
5192 struct ice_aq_desc desc = { 0 };
5193 struct ice_aqc_i2c *cmd;
5194 u8 data_size;
5195 int status;
5196
5197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5198 cmd = &desc.params.read_write_i2c;
5199
5200 if (!data)
5201 return -EINVAL;
5202
5203 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5204
5205 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5206 cmd->topo_addr = topo_addr;
5207 cmd->i2c_params = params;
5208 cmd->i2c_addr = addr;
5209
5210 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5211 if (!status) {
5212 struct ice_aqc_read_i2c_resp *resp;
5213 u8 i;
5214
5215 resp = &desc.params.read_i2c_resp;
5216 for (i = 0; i < data_size; i++) {
5217 *data = resp->i2c_data[i];
5218 data++;
5219 }
5220 }
5221
5222 return status;
5223 }
5224
5225 /**
5226 * ice_aq_write_i2c
5227 * @hw: pointer to the hw struct
5228 * @topo_addr: topology address for a device to communicate with
5229 * @bus_addr: 7-bit I2C bus address
5230 * @addr: I2C memory address (I2C offset) with up to 16 bits
5231 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5232 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5233 * @cd: pointer to command details structure or NULL
5234 *
5235 * Write I2C (0x06E3)
5236 *
5237 * * Return:
5238 * * 0 - Successful write to the i2c device
5239 * * -EINVAL - Data size greater than 4 bytes
5240 * * -EIO - FW error
5241 */
5242 int
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,const u8 * data,struct ice_sq_cd * cd)5243 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5244 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
5245 struct ice_sq_cd *cd)
5246 {
5247 struct ice_aq_desc desc = { 0 };
5248 struct ice_aqc_i2c *cmd;
5249 u8 data_size;
5250
5251 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5252 cmd = &desc.params.read_write_i2c;
5253
5254 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5255
5256 /* data_size limited to 4 */
5257 if (data_size > 4)
5258 return -EINVAL;
5259
5260 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5261 cmd->topo_addr = topo_addr;
5262 cmd->i2c_params = params;
5263 cmd->i2c_addr = addr;
5264
5265 memcpy(cmd->i2c_data, data, data_size);
5266
5267 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5268 }
5269
5270 /**
5271 * ice_aq_set_driver_param - Set driver parameter to share via firmware
5272 * @hw: pointer to the HW struct
5273 * @idx: parameter index to set
5274 * @value: the value to set the parameter to
5275 * @cd: pointer to command details structure or NULL
5276 *
5277 * Set the value of one of the software defined parameters. All PFs connected
5278 * to this device can read the value using ice_aq_get_driver_param.
5279 *
5280 * Note that firmware provides no synchronization or locking, and will not
5281 * save the parameter value during a device reset. It is expected that
5282 * a single PF will write the parameter value, while all other PFs will only
5283 * read it.
5284 */
5285 int
ice_aq_set_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 value,struct ice_sq_cd * cd)5286 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5287 u32 value, struct ice_sq_cd *cd)
5288 {
5289 struct ice_aqc_driver_shared_params *cmd;
5290 struct ice_aq_desc desc;
5291
5292 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5293 return -EIO;
5294
5295 cmd = &desc.params.drv_shared_params;
5296
5297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5298
5299 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
5300 cmd->param_indx = idx;
5301 cmd->param_val = cpu_to_le32(value);
5302
5303 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5304 }
5305
5306 /**
5307 * ice_aq_get_driver_param - Get driver parameter shared via firmware
5308 * @hw: pointer to the HW struct
5309 * @idx: parameter index to set
5310 * @value: storage to return the shared parameter
5311 * @cd: pointer to command details structure or NULL
5312 *
5313 * Get the value of one of the software defined parameters.
5314 *
5315 * Note that firmware provides no synchronization or locking. It is expected
5316 * that only a single PF will write a given parameter.
5317 */
5318 int
ice_aq_get_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 * value,struct ice_sq_cd * cd)5319 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5320 u32 *value, struct ice_sq_cd *cd)
5321 {
5322 struct ice_aqc_driver_shared_params *cmd;
5323 struct ice_aq_desc desc;
5324 int status;
5325
5326 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5327 return -EIO;
5328
5329 cmd = &desc.params.drv_shared_params;
5330
5331 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5332
5333 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
5334 cmd->param_indx = idx;
5335
5336 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5337 if (status)
5338 return status;
5339
5340 *value = le32_to_cpu(cmd->param_val);
5341
5342 return 0;
5343 }
5344
5345 /**
5346 * ice_aq_set_gpio
5347 * @hw: pointer to the hw struct
5348 * @gpio_ctrl_handle: GPIO controller node handle
5349 * @pin_idx: IO Number of the GPIO that needs to be set
5350 * @value: SW provide IO value to set in the LSB
5351 * @cd: pointer to command details structure or NULL
5352 *
5353 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5354 */
5355 int
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)5356 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5357 struct ice_sq_cd *cd)
5358 {
5359 struct ice_aqc_gpio *cmd;
5360 struct ice_aq_desc desc;
5361
5362 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5363 cmd = &desc.params.read_write_gpio;
5364 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5365 cmd->gpio_num = pin_idx;
5366 cmd->gpio_val = value ? 1 : 0;
5367
5368 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5369 }
5370
5371 /**
5372 * ice_aq_get_gpio
5373 * @hw: pointer to the hw struct
5374 * @gpio_ctrl_handle: GPIO controller node handle
5375 * @pin_idx: IO Number of the GPIO that needs to be set
5376 * @value: IO value read
5377 * @cd: pointer to command details structure or NULL
5378 *
5379 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5380 * the topology
5381 */
5382 int
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)5383 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5384 bool *value, struct ice_sq_cd *cd)
5385 {
5386 struct ice_aqc_gpio *cmd;
5387 struct ice_aq_desc desc;
5388 int status;
5389
5390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5391 cmd = &desc.params.read_write_gpio;
5392 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5393 cmd->gpio_num = pin_idx;
5394
5395 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5396 if (status)
5397 return status;
5398
5399 *value = !!cmd->gpio_val;
5400 return 0;
5401 }
5402
5403 /**
5404 * ice_is_fw_api_min_ver
5405 * @hw: pointer to the hardware structure
5406 * @maj: major version
5407 * @min: minor version
5408 * @patch: patch version
5409 *
5410 * Checks if the firmware API is minimum version
5411 */
ice_is_fw_api_min_ver(struct ice_hw * hw,u8 maj,u8 min,u8 patch)5412 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
5413 {
5414 if (hw->api_maj_ver == maj) {
5415 if (hw->api_min_ver > min)
5416 return true;
5417 if (hw->api_min_ver == min && hw->api_patch >= patch)
5418 return true;
5419 } else if (hw->api_maj_ver > maj) {
5420 return true;
5421 }
5422
5423 return false;
5424 }
5425
5426 /**
5427 * ice_fw_supports_link_override
5428 * @hw: pointer to the hardware structure
5429 *
5430 * Checks if the firmware supports link override
5431 */
ice_fw_supports_link_override(struct ice_hw * hw)5432 bool ice_fw_supports_link_override(struct ice_hw *hw)
5433 {
5434 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
5435 ICE_FW_API_LINK_OVERRIDE_MIN,
5436 ICE_FW_API_LINK_OVERRIDE_PATCH);
5437 }
5438
5439 /**
5440 * ice_get_link_default_override
5441 * @ldo: pointer to the link default override struct
5442 * @pi: pointer to the port info struct
5443 *
5444 * Gets the link default override for a port
5445 */
5446 int
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)5447 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5448 struct ice_port_info *pi)
5449 {
5450 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5451 struct ice_hw *hw = pi->hw;
5452 int status;
5453
5454 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5455 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5456 if (status) {
5457 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5458 return status;
5459 }
5460
5461 /* Each port has its own config; calculate for our port */
5462 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5463 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5464
5465 /* link options first */
5466 status = ice_read_sr_word(hw, tlv_start, &buf);
5467 if (status) {
5468 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5469 return status;
5470 }
5471 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5472 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5473 ICE_LINK_OVERRIDE_PHY_CFG_S;
5474
5475 /* link PHY config */
5476 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5477 status = ice_read_sr_word(hw, offset, &buf);
5478 if (status) {
5479 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5480 return status;
5481 }
5482 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5483
5484 /* PHY types low */
5485 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5486 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5487 status = ice_read_sr_word(hw, (offset + i), &buf);
5488 if (status) {
5489 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5490 return status;
5491 }
5492 /* shift 16 bits at a time to fill 64 bits */
5493 ldo->phy_type_low |= ((u64)buf << (i * 16));
5494 }
5495
5496 /* PHY types high */
5497 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5498 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5499 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5500 status = ice_read_sr_word(hw, (offset + i), &buf);
5501 if (status) {
5502 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5503 return status;
5504 }
5505 /* shift 16 bits at a time to fill 64 bits */
5506 ldo->phy_type_high |= ((u64)buf << (i * 16));
5507 }
5508
5509 return status;
5510 }
5511
5512 /**
5513 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5514 * @caps: get PHY capability data
5515 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)5516 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5517 {
5518 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5519 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5520 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5521 ICE_AQC_PHY_AN_EN_CLAUSE37))
5522 return true;
5523
5524 return false;
5525 }
5526
5527 /**
5528 * ice_aq_set_lldp_mib - Set the LLDP MIB
5529 * @hw: pointer to the HW struct
5530 * @mib_type: Local, Remote or both Local and Remote MIBs
5531 * @buf: pointer to the caller-supplied buffer to store the MIB block
5532 * @buf_size: size of the buffer (in bytes)
5533 * @cd: pointer to command details structure or NULL
5534 *
5535 * Set the LLDP MIB. (0x0A08)
5536 */
5537 int
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)5538 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5539 struct ice_sq_cd *cd)
5540 {
5541 struct ice_aqc_lldp_set_local_mib *cmd;
5542 struct ice_aq_desc desc;
5543
5544 cmd = &desc.params.lldp_set_mib;
5545
5546 if (buf_size == 0 || !buf)
5547 return -EINVAL;
5548
5549 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5550
5551 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5552 desc.datalen = cpu_to_le16(buf_size);
5553
5554 cmd->type = mib_type;
5555 cmd->length = cpu_to_le16(buf_size);
5556
5557 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5558 }
5559
5560 /**
5561 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5562 * @hw: pointer to HW struct
5563 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)5564 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5565 {
5566 if (hw->mac_type != ICE_MAC_E810)
5567 return false;
5568
5569 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
5570 ICE_FW_API_LLDP_FLTR_MIN,
5571 ICE_FW_API_LLDP_FLTR_PATCH);
5572 }
5573
5574 /**
5575 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5576 * @hw: pointer to HW struct
5577 * @vsi_num: absolute HW index for VSI
5578 * @add: boolean for if adding or removing a filter
5579 */
5580 int
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)5581 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5582 {
5583 struct ice_aqc_lldp_filter_ctrl *cmd;
5584 struct ice_aq_desc desc;
5585
5586 cmd = &desc.params.lldp_filter_ctrl;
5587
5588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5589
5590 if (add)
5591 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5592 else
5593 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5594
5595 cmd->vsi_num = cpu_to_le16(vsi_num);
5596
5597 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5598 }
5599
5600 /**
5601 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
5602 * @hw: pointer to HW struct
5603 */
ice_lldp_execute_pending_mib(struct ice_hw * hw)5604 int ice_lldp_execute_pending_mib(struct ice_hw *hw)
5605 {
5606 struct ice_aq_desc desc;
5607
5608 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
5609
5610 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5611 }
5612
5613 /**
5614 * ice_fw_supports_report_dflt_cfg
5615 * @hw: pointer to the hardware structure
5616 *
5617 * Checks if the firmware supports report default configuration
5618 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)5619 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5620 {
5621 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
5622 ICE_FW_API_REPORT_DFLT_CFG_MIN,
5623 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
5624 }
5625
5626 /* each of the indexes into the following array match the speed of a return
5627 * value from the list of AQ returned speeds like the range:
5628 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
5629 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
5630 * array. The array is defined as 15 elements long because the link_speed
5631 * returned by the firmware is a 16 bit * value, but is indexed
5632 * by [fls(speed) - 1]
5633 */
5634 static const u32 ice_aq_to_link_speed[] = {
5635 SPEED_10, /* BIT(0) */
5636 SPEED_100,
5637 SPEED_1000,
5638 SPEED_2500,
5639 SPEED_5000,
5640 SPEED_10000,
5641 SPEED_20000,
5642 SPEED_25000,
5643 SPEED_40000,
5644 SPEED_50000,
5645 SPEED_100000, /* BIT(10) */
5646 };
5647
5648 /**
5649 * ice_get_link_speed - get integer speed from table
5650 * @index: array index from fls(aq speed) - 1
5651 *
5652 * Returns: u32 value containing integer speed
5653 */
ice_get_link_speed(u16 index)5654 u32 ice_get_link_speed(u16 index)
5655 {
5656 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
5657 return 0;
5658
5659 return ice_aq_to_link_speed[index];
5660 }
5661