1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8
9 #define ICE_PF_RESET_WAIT_COUNT 300
10
11 /**
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
14 *
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
17 */
ice_set_mac_type(struct ice_hw * hw)18 static int ice_set_mac_type(struct ice_hw *hw)
19 {
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return -ENODEV;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_BACKPLANE:
28 case ICE_DEV_ID_E810_XXV_QSFP:
29 case ICE_DEV_ID_E810_XXV_SFP:
30 hw->mac_type = ICE_MAC_E810;
31 break;
32 case ICE_DEV_ID_E823C_10G_BASE_T:
33 case ICE_DEV_ID_E823C_BACKPLANE:
34 case ICE_DEV_ID_E823C_QSFP:
35 case ICE_DEV_ID_E823C_SFP:
36 case ICE_DEV_ID_E823C_SGMII:
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
52 break;
53 default:
54 hw->mac_type = ICE_MAC_UNKNOWN;
55 break;
56 }
57
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 return 0;
60 }
61
62 /**
63 * ice_is_e810
64 * @hw: pointer to the hardware structure
65 *
66 * returns true if the device is E810 based, false if not.
67 */
ice_is_e810(struct ice_hw * hw)68 bool ice_is_e810(struct ice_hw *hw)
69 {
70 return hw->mac_type == ICE_MAC_E810;
71 }
72
73 /**
74 * ice_is_e810t
75 * @hw: pointer to the hardware structure
76 *
77 * returns true if the device is E810T based, false if not.
78 */
ice_is_e810t(struct ice_hw * hw)79 bool ice_is_e810t(struct ice_hw *hw)
80 {
81 switch (hw->device_id) {
82 case ICE_DEV_ID_E810C_SFP:
83 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
84 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
85 return true;
86 break;
87 default:
88 break;
89 }
90
91 return false;
92 }
93
94 /**
95 * ice_clear_pf_cfg - Clear PF configuration
96 * @hw: pointer to the hardware structure
97 *
98 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
99 * configuration, flow director filters, etc.).
100 */
ice_clear_pf_cfg(struct ice_hw * hw)101 int ice_clear_pf_cfg(struct ice_hw *hw)
102 {
103 struct ice_aq_desc desc;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
106
107 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
108 }
109
110 /**
111 * ice_aq_manage_mac_read - manage MAC address read command
112 * @hw: pointer to the HW struct
113 * @buf: a virtual buffer to hold the manage MAC read response
114 * @buf_size: Size of the virtual buffer
115 * @cd: pointer to command details structure or NULL
116 *
117 * This function is used to return per PF station MAC address (0x0107).
118 * NOTE: Upon successful completion of this command, MAC address information
119 * is returned in user specified buffer. Please interpret user specified
120 * buffer as "manage_mac_read" response.
121 * Response such as various MAC addresses are stored in HW struct (port.mac)
122 * ice_discover_dev_caps is expected to be called before this function is
123 * called.
124 */
125 static int
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)126 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
127 struct ice_sq_cd *cd)
128 {
129 struct ice_aqc_manage_mac_read_resp *resp;
130 struct ice_aqc_manage_mac_read *cmd;
131 struct ice_aq_desc desc;
132 int status;
133 u16 flags;
134 u8 i;
135
136 cmd = &desc.params.mac_read;
137
138 if (buf_size < sizeof(*resp))
139 return -EINVAL;
140
141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
142
143 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
144 if (status)
145 return status;
146
147 resp = buf;
148 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
149
150 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
151 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
152 return -EIO;
153 }
154
155 /* A single port can report up to two (LAN and WoL) addresses */
156 for (i = 0; i < cmd->num_addr; i++)
157 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
158 ether_addr_copy(hw->port_info->mac.lan_addr,
159 resp[i].mac_addr);
160 ether_addr_copy(hw->port_info->mac.perm_addr,
161 resp[i].mac_addr);
162 break;
163 }
164
165 return 0;
166 }
167
168 /**
169 * ice_aq_get_phy_caps - returns PHY capabilities
170 * @pi: port information structure
171 * @qual_mods: report qualified modules
172 * @report_mode: report mode capabilities
173 * @pcaps: structure for PHY capabilities to be filled
174 * @cd: pointer to command details structure or NULL
175 *
176 * Returns the various PHY capabilities supported on the Port (0x0600)
177 */
178 int
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)179 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
180 struct ice_aqc_get_phy_caps_data *pcaps,
181 struct ice_sq_cd *cd)
182 {
183 struct ice_aqc_get_phy_caps *cmd;
184 u16 pcaps_size = sizeof(*pcaps);
185 struct ice_aq_desc desc;
186 struct ice_hw *hw;
187 int status;
188
189 cmd = &desc.params.get_phy;
190
191 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
192 return -EINVAL;
193 hw = pi->hw;
194
195 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
196 !ice_fw_supports_report_dflt_cfg(hw))
197 return -EINVAL;
198
199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
200
201 if (qual_mods)
202 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
203
204 cmd->param0 |= cpu_to_le16(report_mode);
205 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
206
207 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
208 report_mode);
209 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
210 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
211 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
212 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
213 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
214 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
215 pcaps->low_power_ctrl_an);
216 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
217 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
218 pcaps->eeer_value);
219 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
220 pcaps->link_fec_options);
221 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
222 pcaps->module_compliance_enforcement);
223 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
224 pcaps->extended_compliance_code);
225 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
226 pcaps->module_type[0]);
227 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
228 pcaps->module_type[1]);
229 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
230 pcaps->module_type[2]);
231
232 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
233 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
234 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
235 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
236 sizeof(pi->phy.link_info.module_type));
237 }
238
239 return status;
240 }
241
242 /**
243 * ice_aq_get_link_topo_handle - get link topology node return status
244 * @pi: port information structure
245 * @node_type: requested node type
246 * @cd: pointer to command details structure or NULL
247 *
248 * Get link topology node return status for specified node type (0x06E0)
249 *
250 * Node type cage can be used to determine if cage is present. If AQC
251 * returns error (ENOENT), then no cage present. If no cage present, then
252 * connection type is backplane or BASE-T.
253 */
254 static int
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)255 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
256 struct ice_sq_cd *cd)
257 {
258 struct ice_aqc_get_link_topo *cmd;
259 struct ice_aq_desc desc;
260
261 cmd = &desc.params.get_link_topo;
262
263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
264
265 cmd->addr.topo_params.node_type_ctx =
266 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
267 ICE_AQC_LINK_TOPO_NODE_CTX_S);
268
269 /* set node type */
270 cmd->addr.topo_params.node_type_ctx |=
271 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
272
273 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
274 }
275
276 /**
277 * ice_is_media_cage_present
278 * @pi: port information structure
279 *
280 * Returns true if media cage is present, else false. If no cage, then
281 * media type is backplane or BASE-T.
282 */
ice_is_media_cage_present(struct ice_port_info * pi)283 static bool ice_is_media_cage_present(struct ice_port_info *pi)
284 {
285 /* Node type cage can be used to determine if cage is present. If AQC
286 * returns error (ENOENT), then no cage present. If no cage present then
287 * connection type is backplane or BASE-T.
288 */
289 return !ice_aq_get_link_topo_handle(pi,
290 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
291 NULL);
292 }
293
294 /**
295 * ice_get_media_type - Gets media type
296 * @pi: port information structure
297 */
ice_get_media_type(struct ice_port_info * pi)298 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
299 {
300 struct ice_link_status *hw_link_info;
301
302 if (!pi)
303 return ICE_MEDIA_UNKNOWN;
304
305 hw_link_info = &pi->phy.link_info;
306 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
307 /* If more than one media type is selected, report unknown */
308 return ICE_MEDIA_UNKNOWN;
309
310 if (hw_link_info->phy_type_low) {
311 /* 1G SGMII is a special case where some DA cable PHYs
312 * may show this as an option when it really shouldn't
313 * be since SGMII is meant to be between a MAC and a PHY
314 * in a backplane. Try to detect this case and handle it
315 */
316 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
317 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
318 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
319 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
320 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
321 return ICE_MEDIA_DA;
322
323 switch (hw_link_info->phy_type_low) {
324 case ICE_PHY_TYPE_LOW_1000BASE_SX:
325 case ICE_PHY_TYPE_LOW_1000BASE_LX:
326 case ICE_PHY_TYPE_LOW_10GBASE_SR:
327 case ICE_PHY_TYPE_LOW_10GBASE_LR:
328 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
329 case ICE_PHY_TYPE_LOW_25GBASE_SR:
330 case ICE_PHY_TYPE_LOW_25GBASE_LR:
331 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
332 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
333 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
334 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
335 case ICE_PHY_TYPE_LOW_50GBASE_SR:
336 case ICE_PHY_TYPE_LOW_50GBASE_FR:
337 case ICE_PHY_TYPE_LOW_50GBASE_LR:
338 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
339 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
340 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
341 case ICE_PHY_TYPE_LOW_100GBASE_DR:
342 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
343 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
344 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
345 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
346 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
347 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
348 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
349 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
350 return ICE_MEDIA_FIBER;
351 case ICE_PHY_TYPE_LOW_100BASE_TX:
352 case ICE_PHY_TYPE_LOW_1000BASE_T:
353 case ICE_PHY_TYPE_LOW_2500BASE_T:
354 case ICE_PHY_TYPE_LOW_5GBASE_T:
355 case ICE_PHY_TYPE_LOW_10GBASE_T:
356 case ICE_PHY_TYPE_LOW_25GBASE_T:
357 return ICE_MEDIA_BASET;
358 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
359 case ICE_PHY_TYPE_LOW_25GBASE_CR:
360 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
361 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
362 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
363 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
364 case ICE_PHY_TYPE_LOW_50GBASE_CP:
365 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
366 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
367 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
368 return ICE_MEDIA_DA;
369 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
370 case ICE_PHY_TYPE_LOW_40G_XLAUI:
371 case ICE_PHY_TYPE_LOW_50G_LAUI2:
372 case ICE_PHY_TYPE_LOW_50G_AUI2:
373 case ICE_PHY_TYPE_LOW_50G_AUI1:
374 case ICE_PHY_TYPE_LOW_100G_AUI4:
375 case ICE_PHY_TYPE_LOW_100G_CAUI4:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_LOW_1000BASE_KX:
380 case ICE_PHY_TYPE_LOW_2500BASE_KX:
381 case ICE_PHY_TYPE_LOW_2500BASE_X:
382 case ICE_PHY_TYPE_LOW_5GBASE_KR:
383 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
384 case ICE_PHY_TYPE_LOW_25GBASE_KR:
385 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
386 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
387 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
388 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
389 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
390 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
391 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
392 return ICE_MEDIA_BACKPLANE;
393 }
394 } else {
395 switch (hw_link_info->phy_type_high) {
396 case ICE_PHY_TYPE_HIGH_100G_AUI2:
397 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
398 if (ice_is_media_cage_present(pi))
399 return ICE_MEDIA_DA;
400 fallthrough;
401 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
402 return ICE_MEDIA_BACKPLANE;
403 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
404 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
405 return ICE_MEDIA_FIBER;
406 }
407 }
408 return ICE_MEDIA_UNKNOWN;
409 }
410
411 /**
412 * ice_aq_get_link_info
413 * @pi: port information structure
414 * @ena_lse: enable/disable LinkStatusEvent reporting
415 * @link: pointer to link status structure - optional
416 * @cd: pointer to command details structure or NULL
417 *
418 * Get Link Status (0x607). Returns the link status of the adapter.
419 */
420 int
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)421 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
422 struct ice_link_status *link, struct ice_sq_cd *cd)
423 {
424 struct ice_aqc_get_link_status_data link_data = { 0 };
425 struct ice_aqc_get_link_status *resp;
426 struct ice_link_status *li_old, *li;
427 enum ice_media_type *hw_media_type;
428 struct ice_fc_info *hw_fc_info;
429 bool tx_pause, rx_pause;
430 struct ice_aq_desc desc;
431 struct ice_hw *hw;
432 u16 cmd_flags;
433 int status;
434
435 if (!pi)
436 return -EINVAL;
437 hw = pi->hw;
438 li_old = &pi->phy.link_info_old;
439 hw_media_type = &pi->phy.media_type;
440 li = &pi->phy.link_info;
441 hw_fc_info = &pi->fc;
442
443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
444 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
445 resp = &desc.params.get_link_status;
446 resp->cmd_flags = cpu_to_le16(cmd_flags);
447 resp->lport_num = pi->lport;
448
449 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
450
451 if (status)
452 return status;
453
454 /* save off old link status information */
455 *li_old = *li;
456
457 /* update current link status information */
458 li->link_speed = le16_to_cpu(link_data.link_speed);
459 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
460 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
461 *hw_media_type = ice_get_media_type(pi);
462 li->link_info = link_data.link_info;
463 li->link_cfg_err = link_data.link_cfg_err;
464 li->an_info = link_data.an_info;
465 li->ext_info = link_data.ext_info;
466 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
467 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
468 li->topo_media_conflict = link_data.topo_media_conflict;
469 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
470 ICE_AQ_CFG_PACING_TYPE_M);
471
472 /* update fc info */
473 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
474 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
475 if (tx_pause && rx_pause)
476 hw_fc_info->current_mode = ICE_FC_FULL;
477 else if (tx_pause)
478 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
479 else if (rx_pause)
480 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
481 else
482 hw_fc_info->current_mode = ICE_FC_NONE;
483
484 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
485
486 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
487 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
488 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
489 (unsigned long long)li->phy_type_low);
490 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
491 (unsigned long long)li->phy_type_high);
492 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
493 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
494 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
495 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
496 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
497 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
498 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
499 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
500 li->max_frame_size);
501 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
502
503 /* save link status information */
504 if (link)
505 *link = *li;
506
507 /* flag cleared so calling functions don't call AQ again */
508 pi->phy.get_link_info = false;
509
510 return 0;
511 }
512
513 /**
514 * ice_fill_tx_timer_and_fc_thresh
515 * @hw: pointer to the HW struct
516 * @cmd: pointer to MAC cfg structure
517 *
518 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
519 * descriptor
520 */
521 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)522 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
523 struct ice_aqc_set_mac_cfg *cmd)
524 {
525 u16 fc_thres_val, tx_timer_val;
526 u32 val;
527
528 /* We read back the transmit timer and FC threshold value of
529 * LFC. Thus, we will use index =
530 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
531 *
532 * Also, because we are operating on transmit timer and FC
533 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
534 */
535 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
536
537 /* Retrieve the transmit timer */
538 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
539 tx_timer_val = val &
540 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
541 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
542
543 /* Retrieve the FC threshold */
544 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
545 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
546
547 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
548 }
549
550 /**
551 * ice_aq_set_mac_cfg
552 * @hw: pointer to the HW struct
553 * @max_frame_size: Maximum Frame Size to be supported
554 * @cd: pointer to command details structure or NULL
555 *
556 * Set MAC configuration (0x0603)
557 */
558 int
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)559 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
560 {
561 struct ice_aqc_set_mac_cfg *cmd;
562 struct ice_aq_desc desc;
563
564 cmd = &desc.params.set_mac_cfg;
565
566 if (max_frame_size == 0)
567 return -EINVAL;
568
569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
570
571 cmd->max_frame_size = cpu_to_le16(max_frame_size);
572
573 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
574
575 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
576 }
577
578 /**
579 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
580 * @hw: pointer to the HW struct
581 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)582 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
583 {
584 struct ice_switch_info *sw;
585 int status;
586
587 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
588 sizeof(*hw->switch_info), GFP_KERNEL);
589 sw = hw->switch_info;
590
591 if (!sw)
592 return -ENOMEM;
593
594 INIT_LIST_HEAD(&sw->vsi_list_map_head);
595 sw->prof_res_bm_init = 0;
596
597 status = ice_init_def_sw_recp(hw);
598 if (status) {
599 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
600 return status;
601 }
602 return 0;
603 }
604
605 /**
606 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
607 * @hw: pointer to the HW struct
608 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)609 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
610 {
611 struct ice_switch_info *sw = hw->switch_info;
612 struct ice_vsi_list_map_info *v_pos_map;
613 struct ice_vsi_list_map_info *v_tmp_map;
614 struct ice_sw_recipe *recps;
615 u8 i;
616
617 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
618 list_entry) {
619 list_del(&v_pos_map->list_entry);
620 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
621 }
622 recps = sw->recp_list;
623 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
624 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
625
626 recps[i].root_rid = i;
627 list_for_each_entry_safe(rg_entry, tmprg_entry,
628 &recps[i].rg_list, l_entry) {
629 list_del(&rg_entry->l_entry);
630 devm_kfree(ice_hw_to_dev(hw), rg_entry);
631 }
632
633 if (recps[i].adv_rule) {
634 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
635 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
636
637 mutex_destroy(&recps[i].filt_rule_lock);
638 list_for_each_entry_safe(lst_itr, tmp_entry,
639 &recps[i].filt_rules,
640 list_entry) {
641 list_del(&lst_itr->list_entry);
642 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
643 devm_kfree(ice_hw_to_dev(hw), lst_itr);
644 }
645 } else {
646 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647
648 mutex_destroy(&recps[i].filt_rule_lock);
649 list_for_each_entry_safe(lst_itr, tmp_entry,
650 &recps[i].filt_rules,
651 list_entry) {
652 list_del(&lst_itr->list_entry);
653 devm_kfree(ice_hw_to_dev(hw), lst_itr);
654 }
655 }
656 if (recps[i].root_buf)
657 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
658 }
659 ice_rm_all_sw_replay_rule_info(hw);
660 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
661 devm_kfree(ice_hw_to_dev(hw), sw);
662 }
663
664 /**
665 * ice_get_fw_log_cfg - get FW logging configuration
666 * @hw: pointer to the HW struct
667 */
ice_get_fw_log_cfg(struct ice_hw * hw)668 static int ice_get_fw_log_cfg(struct ice_hw *hw)
669 {
670 struct ice_aq_desc desc;
671 __le16 *config;
672 int status;
673 u16 size;
674
675 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
676 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
677 if (!config)
678 return -ENOMEM;
679
680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
681
682 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
683 if (!status) {
684 u16 i;
685
686 /* Save FW logging information into the HW structure */
687 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
688 u16 v, m, flgs;
689
690 v = le16_to_cpu(config[i]);
691 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
692 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
693
694 if (m < ICE_AQC_FW_LOG_ID_MAX)
695 hw->fw_log.evnts[m].cur = flgs;
696 }
697 }
698
699 devm_kfree(ice_hw_to_dev(hw), config);
700
701 return status;
702 }
703
704 /**
705 * ice_cfg_fw_log - configure FW logging
706 * @hw: pointer to the HW struct
707 * @enable: enable certain FW logging events if true, disable all if false
708 *
709 * This function enables/disables the FW logging via Rx CQ events and a UART
710 * port based on predetermined configurations. FW logging via the Rx CQ can be
711 * enabled/disabled for individual PF's. However, FW logging via the UART can
712 * only be enabled/disabled for all PFs on the same device.
713 *
714 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
715 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
716 * before initializing the device.
717 *
718 * When re/configuring FW logging, callers need to update the "cfg" elements of
719 * the hw->fw_log.evnts array with the desired logging event configurations for
720 * modules of interest. When disabling FW logging completely, the callers can
721 * just pass false in the "enable" parameter. On completion, the function will
722 * update the "cur" element of the hw->fw_log.evnts array with the resulting
723 * logging event configurations of the modules that are being re/configured. FW
724 * logging modules that are not part of a reconfiguration operation retain their
725 * previous states.
726 *
727 * Before resetting the device, it is recommended that the driver disables FW
728 * logging before shutting down the control queue. When disabling FW logging
729 * ("enable" = false), the latest configurations of FW logging events stored in
730 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
731 * a device reset.
732 *
733 * When enabling FW logging to emit log messages via the Rx CQ during the
734 * device's initialization phase, a mechanism alternative to interrupt handlers
735 * needs to be used to extract FW log messages from the Rx CQ periodically and
736 * to prevent the Rx CQ from being full and stalling other types of control
737 * messages from FW to SW. Interrupts are typically disabled during the device's
738 * initialization phase.
739 */
ice_cfg_fw_log(struct ice_hw * hw,bool enable)740 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
741 {
742 struct ice_aqc_fw_logging *cmd;
743 u16 i, chgs = 0, len = 0;
744 struct ice_aq_desc desc;
745 __le16 *data = NULL;
746 u8 actv_evnts = 0;
747 void *buf = NULL;
748 int status = 0;
749
750 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
751 return 0;
752
753 /* Disable FW logging only when the control queue is still responsive */
754 if (!enable &&
755 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
756 return 0;
757
758 /* Get current FW log settings */
759 status = ice_get_fw_log_cfg(hw);
760 if (status)
761 return status;
762
763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
764 cmd = &desc.params.fw_logging;
765
766 /* Indicate which controls are valid */
767 if (hw->fw_log.cq_en)
768 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
769
770 if (hw->fw_log.uart_en)
771 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
772
773 if (enable) {
774 /* Fill in an array of entries with FW logging modules and
775 * logging events being reconfigured.
776 */
777 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
778 u16 val;
779
780 /* Keep track of enabled event types */
781 actv_evnts |= hw->fw_log.evnts[i].cfg;
782
783 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
784 continue;
785
786 if (!data) {
787 data = devm_kcalloc(ice_hw_to_dev(hw),
788 ICE_AQC_FW_LOG_ID_MAX,
789 sizeof(*data),
790 GFP_KERNEL);
791 if (!data)
792 return -ENOMEM;
793 }
794
795 val = i << ICE_AQC_FW_LOG_ID_S;
796 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
797 data[chgs++] = cpu_to_le16(val);
798 }
799
800 /* Only enable FW logging if at least one module is specified.
801 * If FW logging is currently enabled but all modules are not
802 * enabled to emit log messages, disable FW logging altogether.
803 */
804 if (actv_evnts) {
805 /* Leave if there is effectively no change */
806 if (!chgs)
807 goto out;
808
809 if (hw->fw_log.cq_en)
810 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
811
812 if (hw->fw_log.uart_en)
813 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
814
815 buf = data;
816 len = sizeof(*data) * chgs;
817 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
818 }
819 }
820
821 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
822 if (!status) {
823 /* Update the current configuration to reflect events enabled.
824 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
825 * logging mode is enabled for the device. They do not reflect
826 * actual modules being enabled to emit log messages. So, their
827 * values remain unchanged even when all modules are disabled.
828 */
829 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
830
831 hw->fw_log.actv_evnts = actv_evnts;
832 for (i = 0; i < cnt; i++) {
833 u16 v, m;
834
835 if (!enable) {
836 /* When disabling all FW logging events as part
837 * of device's de-initialization, the original
838 * configurations are retained, and can be used
839 * to reconfigure FW logging later if the device
840 * is re-initialized.
841 */
842 hw->fw_log.evnts[i].cur = 0;
843 continue;
844 }
845
846 v = le16_to_cpu(data[i]);
847 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
848 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
849 }
850 }
851
852 out:
853 if (data)
854 devm_kfree(ice_hw_to_dev(hw), data);
855
856 return status;
857 }
858
859 /**
860 * ice_output_fw_log
861 * @hw: pointer to the HW struct
862 * @desc: pointer to the AQ message descriptor
863 * @buf: pointer to the buffer accompanying the AQ message
864 *
865 * Formats a FW Log message and outputs it via the standard driver logs.
866 */
ice_output_fw_log(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf)867 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
868 {
869 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
870 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
871 le16_to_cpu(desc->datalen));
872 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
873 }
874
875 /**
876 * ice_get_itr_intrl_gran
877 * @hw: pointer to the HW struct
878 *
879 * Determines the ITR/INTRL granularities based on the maximum aggregate
880 * bandwidth according to the device's configuration during power-on.
881 */
ice_get_itr_intrl_gran(struct ice_hw * hw)882 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
883 {
884 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
885 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
886 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
887
888 switch (max_agg_bw) {
889 case ICE_MAX_AGG_BW_200G:
890 case ICE_MAX_AGG_BW_100G:
891 case ICE_MAX_AGG_BW_50G:
892 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
893 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
894 break;
895 case ICE_MAX_AGG_BW_25G:
896 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
897 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
898 break;
899 }
900 }
901
902 /**
903 * ice_init_hw - main hardware initialization routine
904 * @hw: pointer to the hardware structure
905 */
ice_init_hw(struct ice_hw * hw)906 int ice_init_hw(struct ice_hw *hw)
907 {
908 struct ice_aqc_get_phy_caps_data *pcaps;
909 u16 mac_buf_len;
910 void *mac_buf;
911 int status;
912
913 /* Set MAC type based on DeviceID */
914 status = ice_set_mac_type(hw);
915 if (status)
916 return status;
917
918 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
919 PF_FUNC_RID_FUNC_NUM_M) >>
920 PF_FUNC_RID_FUNC_NUM_S;
921
922 status = ice_reset(hw, ICE_RESET_PFR);
923 if (status)
924 return status;
925
926 ice_get_itr_intrl_gran(hw);
927
928 status = ice_create_all_ctrlq(hw);
929 if (status)
930 goto err_unroll_cqinit;
931
932 /* Enable FW logging. Not fatal if this fails. */
933 status = ice_cfg_fw_log(hw, true);
934 if (status)
935 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
936
937 status = ice_clear_pf_cfg(hw);
938 if (status)
939 goto err_unroll_cqinit;
940
941 /* Set bit to enable Flow Director filters */
942 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
943 INIT_LIST_HEAD(&hw->fdir_list_head);
944
945 ice_clear_pxe_mode(hw);
946
947 status = ice_init_nvm(hw);
948 if (status)
949 goto err_unroll_cqinit;
950
951 status = ice_get_caps(hw);
952 if (status)
953 goto err_unroll_cqinit;
954
955 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
956 sizeof(*hw->port_info), GFP_KERNEL);
957 if (!hw->port_info) {
958 status = -ENOMEM;
959 goto err_unroll_cqinit;
960 }
961
962 /* set the back pointer to HW */
963 hw->port_info->hw = hw;
964
965 /* Initialize port_info struct with switch configuration data */
966 status = ice_get_initial_sw_cfg(hw);
967 if (status)
968 goto err_unroll_alloc;
969
970 hw->evb_veb = true;
971
972 /* Query the allocated resources for Tx scheduler */
973 status = ice_sched_query_res_alloc(hw);
974 if (status) {
975 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
976 goto err_unroll_alloc;
977 }
978 ice_sched_get_psm_clk_freq(hw);
979
980 /* Initialize port_info struct with scheduler data */
981 status = ice_sched_init_port(hw->port_info);
982 if (status)
983 goto err_unroll_sched;
984
985 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
986 if (!pcaps) {
987 status = -ENOMEM;
988 goto err_unroll_sched;
989 }
990
991 /* Initialize port_info struct with PHY capabilities */
992 status = ice_aq_get_phy_caps(hw->port_info, false,
993 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
994 NULL);
995 devm_kfree(ice_hw_to_dev(hw), pcaps);
996 if (status)
997 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
998 status);
999
1000 /* Initialize port_info struct with link information */
1001 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1002 if (status)
1003 goto err_unroll_sched;
1004
1005 /* need a valid SW entry point to build a Tx tree */
1006 if (!hw->sw_entry_point_layer) {
1007 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1008 status = -EIO;
1009 goto err_unroll_sched;
1010 }
1011 INIT_LIST_HEAD(&hw->agg_list);
1012 /* Initialize max burst size */
1013 if (!hw->max_burst_size)
1014 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1015
1016 status = ice_init_fltr_mgmt_struct(hw);
1017 if (status)
1018 goto err_unroll_sched;
1019
1020 /* Get MAC information */
1021 /* A single port can report up to two (LAN and WoL) addresses */
1022 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1023 sizeof(struct ice_aqc_manage_mac_read_resp),
1024 GFP_KERNEL);
1025 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1026
1027 if (!mac_buf) {
1028 status = -ENOMEM;
1029 goto err_unroll_fltr_mgmt_struct;
1030 }
1031
1032 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1033 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1034
1035 if (status)
1036 goto err_unroll_fltr_mgmt_struct;
1037 /* enable jumbo frame support at MAC level */
1038 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1039 if (status)
1040 goto err_unroll_fltr_mgmt_struct;
1041 /* Obtain counter base index which would be used by flow director */
1042 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1043 if (status)
1044 goto err_unroll_fltr_mgmt_struct;
1045 status = ice_init_hw_tbls(hw);
1046 if (status)
1047 goto err_unroll_fltr_mgmt_struct;
1048 mutex_init(&hw->tnl_lock);
1049 return 0;
1050
1051 err_unroll_fltr_mgmt_struct:
1052 ice_cleanup_fltr_mgmt_struct(hw);
1053 err_unroll_sched:
1054 ice_sched_cleanup_all(hw);
1055 err_unroll_alloc:
1056 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1057 err_unroll_cqinit:
1058 ice_destroy_all_ctrlq(hw);
1059 return status;
1060 }
1061
1062 /**
1063 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1064 * @hw: pointer to the hardware structure
1065 *
1066 * This should be called only during nominal operation, not as a result of
1067 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1068 * applicable initializations if it fails for any reason.
1069 */
ice_deinit_hw(struct ice_hw * hw)1070 void ice_deinit_hw(struct ice_hw *hw)
1071 {
1072 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1073 ice_cleanup_fltr_mgmt_struct(hw);
1074
1075 ice_sched_cleanup_all(hw);
1076 ice_sched_clear_agg(hw);
1077 ice_free_seg(hw);
1078 ice_free_hw_tbls(hw);
1079 mutex_destroy(&hw->tnl_lock);
1080
1081 if (hw->port_info) {
1082 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1083 hw->port_info = NULL;
1084 }
1085
1086 /* Attempt to disable FW logging before shutting down control queues */
1087 ice_cfg_fw_log(hw, false);
1088 ice_destroy_all_ctrlq(hw);
1089
1090 /* Clear VSI contexts if not already cleared */
1091 ice_clear_all_vsi_ctx(hw);
1092 }
1093
1094 /**
1095 * ice_check_reset - Check to see if a global reset is complete
1096 * @hw: pointer to the hardware structure
1097 */
ice_check_reset(struct ice_hw * hw)1098 int ice_check_reset(struct ice_hw *hw)
1099 {
1100 u32 cnt, reg = 0, grst_timeout, uld_mask;
1101
1102 /* Poll for Device Active state in case a recent CORER, GLOBR,
1103 * or EMPR has occurred. The grst delay value is in 100ms units.
1104 * Add 1sec for outstanding AQ commands that can take a long time.
1105 */
1106 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1107 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1108
1109 for (cnt = 0; cnt < grst_timeout; cnt++) {
1110 mdelay(100);
1111 reg = rd32(hw, GLGEN_RSTAT);
1112 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1113 break;
1114 }
1115
1116 if (cnt == grst_timeout) {
1117 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1118 return -EIO;
1119 }
1120
1121 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1122 GLNVM_ULD_PCIER_DONE_1_M |\
1123 GLNVM_ULD_CORER_DONE_M |\
1124 GLNVM_ULD_GLOBR_DONE_M |\
1125 GLNVM_ULD_POR_DONE_M |\
1126 GLNVM_ULD_POR_DONE_1_M |\
1127 GLNVM_ULD_PCIER_DONE_2_M)
1128
1129 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1130 GLNVM_ULD_PE_DONE_M : 0);
1131
1132 /* Device is Active; check Global Reset processes are done */
1133 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1134 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1135 if (reg == uld_mask) {
1136 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1137 break;
1138 }
1139 mdelay(10);
1140 }
1141
1142 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1143 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1144 reg);
1145 return -EIO;
1146 }
1147
1148 return 0;
1149 }
1150
1151 /**
1152 * ice_pf_reset - Reset the PF
1153 * @hw: pointer to the hardware structure
1154 *
1155 * If a global reset has been triggered, this function checks
1156 * for its completion and then issues the PF reset
1157 */
ice_pf_reset(struct ice_hw * hw)1158 static int ice_pf_reset(struct ice_hw *hw)
1159 {
1160 u32 cnt, reg;
1161
1162 /* If at function entry a global reset was already in progress, i.e.
1163 * state is not 'device active' or any of the reset done bits are not
1164 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1165 * global reset is done.
1166 */
1167 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1168 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1169 /* poll on global reset currently in progress until done */
1170 if (ice_check_reset(hw))
1171 return -EIO;
1172
1173 return 0;
1174 }
1175
1176 /* Reset the PF */
1177 reg = rd32(hw, PFGEN_CTRL);
1178
1179 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1180
1181 /* Wait for the PFR to complete. The wait time is the global config lock
1182 * timeout plus the PFR timeout which will account for a possible reset
1183 * that is occurring during a download package operation.
1184 */
1185 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1186 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1187 reg = rd32(hw, PFGEN_CTRL);
1188 if (!(reg & PFGEN_CTRL_PFSWR_M))
1189 break;
1190
1191 mdelay(1);
1192 }
1193
1194 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1195 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1196 return -EIO;
1197 }
1198
1199 return 0;
1200 }
1201
1202 /**
1203 * ice_reset - Perform different types of reset
1204 * @hw: pointer to the hardware structure
1205 * @req: reset request
1206 *
1207 * This function triggers a reset as specified by the req parameter.
1208 *
1209 * Note:
1210 * If anything other than a PF reset is triggered, PXE mode is restored.
1211 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1212 * interface has been restored in the rebuild flow.
1213 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1214 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1215 {
1216 u32 val = 0;
1217
1218 switch (req) {
1219 case ICE_RESET_PFR:
1220 return ice_pf_reset(hw);
1221 case ICE_RESET_CORER:
1222 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1223 val = GLGEN_RTRIG_CORER_M;
1224 break;
1225 case ICE_RESET_GLOBR:
1226 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1227 val = GLGEN_RTRIG_GLOBR_M;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 val |= rd32(hw, GLGEN_RTRIG);
1234 wr32(hw, GLGEN_RTRIG, val);
1235 ice_flush(hw);
1236
1237 /* wait for the FW to be ready */
1238 return ice_check_reset(hw);
1239 }
1240
1241 /**
1242 * ice_copy_rxq_ctx_to_hw
1243 * @hw: pointer to the hardware structure
1244 * @ice_rxq_ctx: pointer to the rxq context
1245 * @rxq_index: the index of the Rx queue
1246 *
1247 * Copies rxq context from dense structure to HW register space
1248 */
1249 static int
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1250 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1251 {
1252 u8 i;
1253
1254 if (!ice_rxq_ctx)
1255 return -EINVAL;
1256
1257 if (rxq_index > QRX_CTRL_MAX_INDEX)
1258 return -EINVAL;
1259
1260 /* Copy each dword separately to HW */
1261 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1262 wr32(hw, QRX_CONTEXT(i, rxq_index),
1263 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1264
1265 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1266 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1267 }
1268
1269 return 0;
1270 }
1271
1272 /* LAN Rx Queue Context */
1273 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1274 /* Field Width LSB */
1275 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1276 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1277 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1278 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1279 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1280 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1281 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1282 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1283 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1284 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1285 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1286 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1287 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1288 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1289 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1290 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1291 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1292 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1293 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1294 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1295 { 0 }
1296 };
1297
1298 /**
1299 * ice_write_rxq_ctx
1300 * @hw: pointer to the hardware structure
1301 * @rlan_ctx: pointer to the rxq context
1302 * @rxq_index: the index of the Rx queue
1303 *
1304 * Converts rxq context from sparse to dense structure and then writes
1305 * it to HW register space and enables the hardware to prefetch descriptors
1306 * instead of only fetching them on demand
1307 */
1308 int
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1309 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1310 u32 rxq_index)
1311 {
1312 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1313
1314 if (!rlan_ctx)
1315 return -EINVAL;
1316
1317 rlan_ctx->prefena = 1;
1318
1319 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1320 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1321 }
1322
1323 /* LAN Tx Queue Context */
1324 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1325 /* Field Width LSB */
1326 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1327 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1328 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1329 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1330 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1331 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1332 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1333 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1334 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1335 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1336 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1337 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1338 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1339 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1340 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1341 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1342 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1343 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1344 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1345 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1346 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1347 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1348 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1349 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1350 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1351 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1352 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1353 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1354 { 0 }
1355 };
1356
1357 /* Sideband Queue command wrappers */
1358
1359 /**
1360 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1361 * @hw: pointer to the HW struct
1362 * @desc: descriptor describing the command
1363 * @buf: buffer to use for indirect commands (NULL for direct commands)
1364 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1365 * @cd: pointer to command details structure
1366 */
1367 static int
ice_sbq_send_cmd(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1368 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1369 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1370 {
1371 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1372 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1373 }
1374
1375 /**
1376 * ice_sbq_rw_reg - Fill Sideband Queue command
1377 * @hw: pointer to the HW struct
1378 * @in: message info to be filled in descriptor
1379 */
ice_sbq_rw_reg(struct ice_hw * hw,struct ice_sbq_msg_input * in)1380 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1381 {
1382 struct ice_sbq_cmd_desc desc = {0};
1383 struct ice_sbq_msg_req msg = {0};
1384 u16 msg_len;
1385 int status;
1386
1387 msg_len = sizeof(msg);
1388
1389 msg.dest_dev = in->dest_dev;
1390 msg.opcode = in->opcode;
1391 msg.flags = ICE_SBQ_MSG_FLAGS;
1392 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1393 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1394 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1395
1396 if (in->opcode)
1397 msg.data = cpu_to_le32(in->data);
1398 else
1399 /* data read comes back in completion, so shorten the struct by
1400 * sizeof(msg.data)
1401 */
1402 msg_len -= sizeof(msg.data);
1403
1404 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1405 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1406 desc.param0.cmd_len = cpu_to_le16(msg_len);
1407 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1408 if (!status && !in->opcode)
1409 in->data = le32_to_cpu
1410 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1411 return status;
1412 }
1413
1414 /* FW Admin Queue command wrappers */
1415
1416 /* Software lock/mutex that is meant to be held while the Global Config Lock
1417 * in firmware is acquired by the software to prevent most (but not all) types
1418 * of AQ commands from being sent to FW
1419 */
1420 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1421
1422 /**
1423 * ice_should_retry_sq_send_cmd
1424 * @opcode: AQ opcode
1425 *
1426 * Decide if we should retry the send command routine for the ATQ, depending
1427 * on the opcode.
1428 */
ice_should_retry_sq_send_cmd(u16 opcode)1429 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1430 {
1431 switch (opcode) {
1432 case ice_aqc_opc_get_link_topo:
1433 case ice_aqc_opc_lldp_stop:
1434 case ice_aqc_opc_lldp_start:
1435 case ice_aqc_opc_lldp_filter_ctrl:
1436 return true;
1437 }
1438
1439 return false;
1440 }
1441
1442 /**
1443 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1444 * @hw: pointer to the HW struct
1445 * @cq: pointer to the specific Control queue
1446 * @desc: prefilled descriptor describing the command
1447 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1448 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1449 * @cd: pointer to command details structure
1450 *
1451 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1452 * Queue if the EBUSY AQ error is returned.
1453 */
1454 static int
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1455 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1456 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1457 struct ice_sq_cd *cd)
1458 {
1459 struct ice_aq_desc desc_cpy;
1460 bool is_cmd_for_retry;
1461 u8 *buf_cpy = NULL;
1462 u8 idx = 0;
1463 u16 opcode;
1464 int status;
1465
1466 opcode = le16_to_cpu(desc->opcode);
1467 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1468 memset(&desc_cpy, 0, sizeof(desc_cpy));
1469
1470 if (is_cmd_for_retry) {
1471 if (buf) {
1472 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1473 if (!buf_cpy)
1474 return -ENOMEM;
1475 }
1476
1477 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1478 }
1479
1480 do {
1481 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1482
1483 if (!is_cmd_for_retry || !status ||
1484 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1485 break;
1486
1487 if (buf_cpy)
1488 memcpy(buf, buf_cpy, buf_size);
1489
1490 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1491
1492 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1493
1494 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1495
1496 kfree(buf_cpy);
1497
1498 return status;
1499 }
1500
1501 /**
1502 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1503 * @hw: pointer to the HW struct
1504 * @desc: descriptor describing the command
1505 * @buf: buffer to use for indirect commands (NULL for direct commands)
1506 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1507 * @cd: pointer to command details structure
1508 *
1509 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1510 */
1511 int
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1512 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1513 u16 buf_size, struct ice_sq_cd *cd)
1514 {
1515 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1516 bool lock_acquired = false;
1517 int status;
1518
1519 /* When a package download is in process (i.e. when the firmware's
1520 * Global Configuration Lock resource is held), only the Download
1521 * Package, Get Version, Get Package Info List, Upload Section,
1522 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
1523 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
1524 * Recipes to Profile Association, and Release Resource (with resource
1525 * ID set to Global Config Lock) AdminQ commands are allowed; all others
1526 * must block until the package download completes and the Global Config
1527 * Lock is released. See also ice_acquire_global_cfg_lock().
1528 */
1529 switch (le16_to_cpu(desc->opcode)) {
1530 case ice_aqc_opc_download_pkg:
1531 case ice_aqc_opc_get_pkg_info_list:
1532 case ice_aqc_opc_get_ver:
1533 case ice_aqc_opc_upload_section:
1534 case ice_aqc_opc_update_pkg:
1535 case ice_aqc_opc_set_port_params:
1536 case ice_aqc_opc_get_vlan_mode_parameters:
1537 case ice_aqc_opc_set_vlan_mode_parameters:
1538 case ice_aqc_opc_add_recipe:
1539 case ice_aqc_opc_recipe_to_profile:
1540 case ice_aqc_opc_get_recipe:
1541 case ice_aqc_opc_get_recipe_to_profile:
1542 break;
1543 case ice_aqc_opc_release_res:
1544 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1545 break;
1546 fallthrough;
1547 default:
1548 mutex_lock(&ice_global_cfg_lock_sw);
1549 lock_acquired = true;
1550 break;
1551 }
1552
1553 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1554 if (lock_acquired)
1555 mutex_unlock(&ice_global_cfg_lock_sw);
1556
1557 return status;
1558 }
1559
1560 /**
1561 * ice_aq_get_fw_ver
1562 * @hw: pointer to the HW struct
1563 * @cd: pointer to command details structure or NULL
1564 *
1565 * Get the firmware version (0x0001) from the admin queue commands
1566 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1567 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1568 {
1569 struct ice_aqc_get_ver *resp;
1570 struct ice_aq_desc desc;
1571 int status;
1572
1573 resp = &desc.params.get_ver;
1574
1575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1576
1577 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1578
1579 if (!status) {
1580 hw->fw_branch = resp->fw_branch;
1581 hw->fw_maj_ver = resp->fw_major;
1582 hw->fw_min_ver = resp->fw_minor;
1583 hw->fw_patch = resp->fw_patch;
1584 hw->fw_build = le32_to_cpu(resp->fw_build);
1585 hw->api_branch = resp->api_branch;
1586 hw->api_maj_ver = resp->api_major;
1587 hw->api_min_ver = resp->api_minor;
1588 hw->api_patch = resp->api_patch;
1589 }
1590
1591 return status;
1592 }
1593
1594 /**
1595 * ice_aq_send_driver_ver
1596 * @hw: pointer to the HW struct
1597 * @dv: driver's major, minor version
1598 * @cd: pointer to command details structure or NULL
1599 *
1600 * Send the driver version (0x0002) to the firmware
1601 */
1602 int
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1603 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1604 struct ice_sq_cd *cd)
1605 {
1606 struct ice_aqc_driver_ver *cmd;
1607 struct ice_aq_desc desc;
1608 u16 len;
1609
1610 cmd = &desc.params.driver_ver;
1611
1612 if (!dv)
1613 return -EINVAL;
1614
1615 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1616
1617 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1618 cmd->major_ver = dv->major_ver;
1619 cmd->minor_ver = dv->minor_ver;
1620 cmd->build_ver = dv->build_ver;
1621 cmd->subbuild_ver = dv->subbuild_ver;
1622
1623 len = 0;
1624 while (len < sizeof(dv->driver_string) &&
1625 isascii(dv->driver_string[len]) && dv->driver_string[len])
1626 len++;
1627
1628 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1629 }
1630
1631 /**
1632 * ice_aq_q_shutdown
1633 * @hw: pointer to the HW struct
1634 * @unloading: is the driver unloading itself
1635 *
1636 * Tell the Firmware that we're shutting down the AdminQ and whether
1637 * or not the driver is unloading as well (0x0003).
1638 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1639 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1640 {
1641 struct ice_aqc_q_shutdown *cmd;
1642 struct ice_aq_desc desc;
1643
1644 cmd = &desc.params.q_shutdown;
1645
1646 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1647
1648 if (unloading)
1649 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1650
1651 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1652 }
1653
1654 /**
1655 * ice_aq_req_res
1656 * @hw: pointer to the HW struct
1657 * @res: resource ID
1658 * @access: access type
1659 * @sdp_number: resource number
1660 * @timeout: the maximum time in ms that the driver may hold the resource
1661 * @cd: pointer to command details structure or NULL
1662 *
1663 * Requests common resource using the admin queue commands (0x0008).
1664 * When attempting to acquire the Global Config Lock, the driver can
1665 * learn of three states:
1666 * 1) 0 - acquired lock, and can perform download package
1667 * 2) -EIO - did not get lock, driver should fail to load
1668 * 3) -EALREADY - did not get lock, but another driver has
1669 * successfully downloaded the package; the driver does
1670 * not have to download the package and can continue
1671 * loading
1672 *
1673 * Note that if the caller is in an acquire lock, perform action, release lock
1674 * phase of operation, it is possible that the FW may detect a timeout and issue
1675 * a CORER. In this case, the driver will receive a CORER interrupt and will
1676 * have to determine its cause. The calling thread that is handling this flow
1677 * will likely get an error propagated back to it indicating the Download
1678 * Package, Update Package or the Release Resource AQ commands timed out.
1679 */
1680 static int
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1681 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1682 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1683 struct ice_sq_cd *cd)
1684 {
1685 struct ice_aqc_req_res *cmd_resp;
1686 struct ice_aq_desc desc;
1687 int status;
1688
1689 cmd_resp = &desc.params.res_owner;
1690
1691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1692
1693 cmd_resp->res_id = cpu_to_le16(res);
1694 cmd_resp->access_type = cpu_to_le16(access);
1695 cmd_resp->res_number = cpu_to_le32(sdp_number);
1696 cmd_resp->timeout = cpu_to_le32(*timeout);
1697 *timeout = 0;
1698
1699 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1700
1701 /* The completion specifies the maximum time in ms that the driver
1702 * may hold the resource in the Timeout field.
1703 */
1704
1705 /* Global config lock response utilizes an additional status field.
1706 *
1707 * If the Global config lock resource is held by some other driver, the
1708 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1709 * and the timeout field indicates the maximum time the current owner
1710 * of the resource has to free it.
1711 */
1712 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1713 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1714 *timeout = le32_to_cpu(cmd_resp->timeout);
1715 return 0;
1716 } else if (le16_to_cpu(cmd_resp->status) ==
1717 ICE_AQ_RES_GLBL_IN_PROG) {
1718 *timeout = le32_to_cpu(cmd_resp->timeout);
1719 return -EIO;
1720 } else if (le16_to_cpu(cmd_resp->status) ==
1721 ICE_AQ_RES_GLBL_DONE) {
1722 return -EALREADY;
1723 }
1724
1725 /* invalid FW response, force a timeout immediately */
1726 *timeout = 0;
1727 return -EIO;
1728 }
1729
1730 /* If the resource is held by some other driver, the command completes
1731 * with a busy return value and the timeout field indicates the maximum
1732 * time the current owner of the resource has to free it.
1733 */
1734 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1735 *timeout = le32_to_cpu(cmd_resp->timeout);
1736
1737 return status;
1738 }
1739
1740 /**
1741 * ice_aq_release_res
1742 * @hw: pointer to the HW struct
1743 * @res: resource ID
1744 * @sdp_number: resource number
1745 * @cd: pointer to command details structure or NULL
1746 *
1747 * release common resource using the admin queue commands (0x0009)
1748 */
1749 static int
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1750 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1751 struct ice_sq_cd *cd)
1752 {
1753 struct ice_aqc_req_res *cmd;
1754 struct ice_aq_desc desc;
1755
1756 cmd = &desc.params.res_owner;
1757
1758 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1759
1760 cmd->res_id = cpu_to_le16(res);
1761 cmd->res_number = cpu_to_le32(sdp_number);
1762
1763 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1764 }
1765
1766 /**
1767 * ice_acquire_res
1768 * @hw: pointer to the HW structure
1769 * @res: resource ID
1770 * @access: access type (read or write)
1771 * @timeout: timeout in milliseconds
1772 *
1773 * This function will attempt to acquire the ownership of a resource.
1774 */
1775 int
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1776 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1777 enum ice_aq_res_access_type access, u32 timeout)
1778 {
1779 #define ICE_RES_POLLING_DELAY_MS 10
1780 u32 delay = ICE_RES_POLLING_DELAY_MS;
1781 u32 time_left = timeout;
1782 int status;
1783
1784 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1785
1786 /* A return code of -EALREADY means that another driver has
1787 * previously acquired the resource and performed any necessary updates;
1788 * in this case the caller does not obtain the resource and has no
1789 * further work to do.
1790 */
1791 if (status == -EALREADY)
1792 goto ice_acquire_res_exit;
1793
1794 if (status)
1795 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1796
1797 /* If necessary, poll until the current lock owner timeouts */
1798 timeout = time_left;
1799 while (status && timeout && time_left) {
1800 mdelay(delay);
1801 timeout = (timeout > delay) ? timeout - delay : 0;
1802 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1803
1804 if (status == -EALREADY)
1805 /* lock free, but no work to do */
1806 break;
1807
1808 if (!status)
1809 /* lock acquired */
1810 break;
1811 }
1812 if (status && status != -EALREADY)
1813 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1814
1815 ice_acquire_res_exit:
1816 if (status == -EALREADY) {
1817 if (access == ICE_RES_WRITE)
1818 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1819 else
1820 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1821 }
1822 return status;
1823 }
1824
1825 /**
1826 * ice_release_res
1827 * @hw: pointer to the HW structure
1828 * @res: resource ID
1829 *
1830 * This function will release a resource using the proper Admin Command.
1831 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)1832 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1833 {
1834 u32 total_delay = 0;
1835 int status;
1836
1837 status = ice_aq_release_res(hw, res, 0, NULL);
1838
1839 /* there are some rare cases when trying to release the resource
1840 * results in an admin queue timeout, so handle them correctly
1841 */
1842 while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
1843 mdelay(1);
1844 status = ice_aq_release_res(hw, res, 0, NULL);
1845 total_delay++;
1846 }
1847 }
1848
1849 /**
1850 * ice_aq_alloc_free_res - command to allocate/free resources
1851 * @hw: pointer to the HW struct
1852 * @num_entries: number of resource entries in buffer
1853 * @buf: Indirect buffer to hold data parameters and response
1854 * @buf_size: size of buffer for indirect commands
1855 * @opc: pass in the command opcode
1856 * @cd: pointer to command details structure or NULL
1857 *
1858 * Helper function to allocate/free resources using the admin queue commands
1859 */
1860 int
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1861 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1862 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1863 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1864 {
1865 struct ice_aqc_alloc_free_res_cmd *cmd;
1866 struct ice_aq_desc desc;
1867
1868 cmd = &desc.params.sw_res_ctrl;
1869
1870 if (!buf)
1871 return -EINVAL;
1872
1873 if (buf_size < flex_array_size(buf, elem, num_entries))
1874 return -EINVAL;
1875
1876 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1877
1878 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1879
1880 cmd->num_entries = cpu_to_le16(num_entries);
1881
1882 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1883 }
1884
1885 /**
1886 * ice_alloc_hw_res - allocate resource
1887 * @hw: pointer to the HW struct
1888 * @type: type of resource
1889 * @num: number of resources to allocate
1890 * @btm: allocate from bottom
1891 * @res: pointer to array that will receive the resources
1892 */
1893 int
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)1894 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1895 {
1896 struct ice_aqc_alloc_free_res_elem *buf;
1897 u16 buf_len;
1898 int status;
1899
1900 buf_len = struct_size(buf, elem, num);
1901 buf = kzalloc(buf_len, GFP_KERNEL);
1902 if (!buf)
1903 return -ENOMEM;
1904
1905 /* Prepare buffer to allocate resource. */
1906 buf->num_elems = cpu_to_le16(num);
1907 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1908 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1909 if (btm)
1910 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1911
1912 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1913 ice_aqc_opc_alloc_res, NULL);
1914 if (status)
1915 goto ice_alloc_res_exit;
1916
1917 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1918
1919 ice_alloc_res_exit:
1920 kfree(buf);
1921 return status;
1922 }
1923
1924 /**
1925 * ice_free_hw_res - free allocated HW resource
1926 * @hw: pointer to the HW struct
1927 * @type: type of resource to free
1928 * @num: number of resources
1929 * @res: pointer to array that contains the resources to free
1930 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)1931 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1932 {
1933 struct ice_aqc_alloc_free_res_elem *buf;
1934 u16 buf_len;
1935 int status;
1936
1937 buf_len = struct_size(buf, elem, num);
1938 buf = kzalloc(buf_len, GFP_KERNEL);
1939 if (!buf)
1940 return -ENOMEM;
1941
1942 /* Prepare buffer to free resource. */
1943 buf->num_elems = cpu_to_le16(num);
1944 buf->res_type = cpu_to_le16(type);
1945 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1946
1947 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1948 ice_aqc_opc_free_res, NULL);
1949 if (status)
1950 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1951
1952 kfree(buf);
1953 return status;
1954 }
1955
1956 /**
1957 * ice_get_num_per_func - determine number of resources per PF
1958 * @hw: pointer to the HW structure
1959 * @max: value to be evenly split between each PF
1960 *
1961 * Determine the number of valid functions by going through the bitmap returned
1962 * from parsing capabilities and use this to calculate the number of resources
1963 * per PF based on the max value passed in.
1964 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)1965 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1966 {
1967 u8 funcs;
1968
1969 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1970 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1971 ICE_CAPS_VALID_FUNCS_M);
1972
1973 if (!funcs)
1974 return 0;
1975
1976 return max / funcs;
1977 }
1978
1979 /**
1980 * ice_parse_common_caps - parse common device/function capabilities
1981 * @hw: pointer to the HW struct
1982 * @caps: pointer to common capabilities structure
1983 * @elem: the capability element to parse
1984 * @prefix: message prefix for tracing capabilities
1985 *
1986 * Given a capability element, extract relevant details into the common
1987 * capability structure.
1988 *
1989 * Returns: true if the capability matches one of the common capability ids,
1990 * false otherwise.
1991 */
1992 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)1993 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1994 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1995 {
1996 u32 logical_id = le32_to_cpu(elem->logical_id);
1997 u32 phys_id = le32_to_cpu(elem->phys_id);
1998 u32 number = le32_to_cpu(elem->number);
1999 u16 cap = le16_to_cpu(elem->cap);
2000 bool found = true;
2001
2002 switch (cap) {
2003 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2004 caps->valid_functions = number;
2005 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2006 caps->valid_functions);
2007 break;
2008 case ICE_AQC_CAPS_SRIOV:
2009 caps->sr_iov_1_1 = (number == 1);
2010 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2011 caps->sr_iov_1_1);
2012 break;
2013 case ICE_AQC_CAPS_DCB:
2014 caps->dcb = (number == 1);
2015 caps->active_tc_bitmap = logical_id;
2016 caps->maxtc = phys_id;
2017 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2019 caps->active_tc_bitmap);
2020 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2021 break;
2022 case ICE_AQC_CAPS_RSS:
2023 caps->rss_table_size = number;
2024 caps->rss_table_entry_width = logical_id;
2025 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2026 caps->rss_table_size);
2027 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2028 caps->rss_table_entry_width);
2029 break;
2030 case ICE_AQC_CAPS_RXQS:
2031 caps->num_rxq = number;
2032 caps->rxq_first_id = phys_id;
2033 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2034 caps->num_rxq);
2035 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2036 caps->rxq_first_id);
2037 break;
2038 case ICE_AQC_CAPS_TXQS:
2039 caps->num_txq = number;
2040 caps->txq_first_id = phys_id;
2041 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2042 caps->num_txq);
2043 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2044 caps->txq_first_id);
2045 break;
2046 case ICE_AQC_CAPS_MSIX:
2047 caps->num_msix_vectors = number;
2048 caps->msix_vector_first_id = phys_id;
2049 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2050 caps->num_msix_vectors);
2051 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2052 caps->msix_vector_first_id);
2053 break;
2054 case ICE_AQC_CAPS_PENDING_NVM_VER:
2055 caps->nvm_update_pending_nvm = true;
2056 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2057 break;
2058 case ICE_AQC_CAPS_PENDING_OROM_VER:
2059 caps->nvm_update_pending_orom = true;
2060 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2061 break;
2062 case ICE_AQC_CAPS_PENDING_NET_VER:
2063 caps->nvm_update_pending_netlist = true;
2064 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2065 break;
2066 case ICE_AQC_CAPS_NVM_MGMT:
2067 caps->nvm_unified_update =
2068 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2069 true : false;
2070 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2071 caps->nvm_unified_update);
2072 break;
2073 case ICE_AQC_CAPS_RDMA:
2074 caps->rdma = (number == 1);
2075 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2076 break;
2077 case ICE_AQC_CAPS_MAX_MTU:
2078 caps->max_mtu = number;
2079 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2080 prefix, caps->max_mtu);
2081 break;
2082 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2083 caps->pcie_reset_avoidance = (number > 0);
2084 ice_debug(hw, ICE_DBG_INIT,
2085 "%s: pcie_reset_avoidance = %d\n", prefix,
2086 caps->pcie_reset_avoidance);
2087 break;
2088 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2089 caps->reset_restrict_support = (number == 1);
2090 ice_debug(hw, ICE_DBG_INIT,
2091 "%s: reset_restrict_support = %d\n", prefix,
2092 caps->reset_restrict_support);
2093 break;
2094 default:
2095 /* Not one of the recognized common capabilities */
2096 found = false;
2097 }
2098
2099 return found;
2100 }
2101
2102 /**
2103 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2104 * @hw: pointer to the HW structure
2105 * @caps: pointer to capabilities structure to fix
2106 *
2107 * Re-calculate the capabilities that are dependent on the number of physical
2108 * ports; i.e. some features are not supported or function differently on
2109 * devices with more than 4 ports.
2110 */
2111 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2112 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2113 {
2114 /* This assumes device capabilities are always scanned before function
2115 * capabilities during the initialization flow.
2116 */
2117 if (hw->dev_caps.num_funcs > 4) {
2118 /* Max 4 TCs per port */
2119 caps->maxtc = 4;
2120 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2121 caps->maxtc);
2122 if (caps->rdma) {
2123 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2124 caps->rdma = 0;
2125 }
2126
2127 /* print message only when processing device capabilities
2128 * during initialization.
2129 */
2130 if (caps == &hw->dev_caps.common_cap)
2131 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2132 }
2133 }
2134
2135 /**
2136 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2137 * @hw: pointer to the HW struct
2138 * @func_p: pointer to function capabilities structure
2139 * @cap: pointer to the capability element to parse
2140 *
2141 * Extract function capabilities for ICE_AQC_CAPS_VF.
2142 */
2143 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2144 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2145 struct ice_aqc_list_caps_elem *cap)
2146 {
2147 u32 logical_id = le32_to_cpu(cap->logical_id);
2148 u32 number = le32_to_cpu(cap->number);
2149
2150 func_p->num_allocd_vfs = number;
2151 func_p->vf_base_id = logical_id;
2152 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2153 func_p->num_allocd_vfs);
2154 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2155 func_p->vf_base_id);
2156 }
2157
2158 /**
2159 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2160 * @hw: pointer to the HW struct
2161 * @func_p: pointer to function capabilities structure
2162 * @cap: pointer to the capability element to parse
2163 *
2164 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2165 */
2166 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2167 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2168 struct ice_aqc_list_caps_elem *cap)
2169 {
2170 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2172 le32_to_cpu(cap->number));
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2174 func_p->guar_num_vsi);
2175 }
2176
2177 /**
2178 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2179 * @hw: pointer to the HW struct
2180 * @func_p: pointer to function capabilities structure
2181 * @cap: pointer to the capability element to parse
2182 *
2183 * Extract function capabilities for ICE_AQC_CAPS_1588.
2184 */
2185 static void
ice_parse_1588_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2186 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2187 struct ice_aqc_list_caps_elem *cap)
2188 {
2189 struct ice_ts_func_info *info = &func_p->ts_func_info;
2190 u32 number = le32_to_cpu(cap->number);
2191
2192 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2193 func_p->common_cap.ieee_1588 = info->ena;
2194
2195 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2196 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2197 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2198 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2199
2200 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2201 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2202
2203 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2204 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2205 } else {
2206 /* Unknown clock frequency, so assume a (probably incorrect)
2207 * default to avoid out-of-bounds look ups of frequency
2208 * related information.
2209 */
2210 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2211 info->clk_freq);
2212 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2213 }
2214
2215 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2216 func_p->common_cap.ieee_1588);
2217 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2218 info->src_tmr_owned);
2219 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2220 info->tmr_ena);
2221 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2222 info->tmr_index_owned);
2223 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2224 info->tmr_index_assoc);
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2226 info->clk_freq);
2227 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2228 info->clk_src);
2229 }
2230
2231 /**
2232 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2233 * @hw: pointer to the HW struct
2234 * @func_p: pointer to function capabilities structure
2235 *
2236 * Extract function capabilities for ICE_AQC_CAPS_FD.
2237 */
2238 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)2239 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2240 {
2241 u32 reg_val, val;
2242
2243 reg_val = rd32(hw, GLQF_FD_SIZE);
2244 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2245 GLQF_FD_SIZE_FD_GSIZE_S;
2246 func_p->fd_fltr_guar =
2247 ice_get_num_per_func(hw, val);
2248 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2249 GLQF_FD_SIZE_FD_BSIZE_S;
2250 func_p->fd_fltr_best_effort = val;
2251
2252 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2253 func_p->fd_fltr_guar);
2254 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2255 func_p->fd_fltr_best_effort);
2256 }
2257
2258 /**
2259 * ice_parse_func_caps - Parse function capabilities
2260 * @hw: pointer to the HW struct
2261 * @func_p: pointer to function capabilities structure
2262 * @buf: buffer containing the function capability records
2263 * @cap_count: the number of capabilities
2264 *
2265 * Helper function to parse function (0x000A) capabilities list. For
2266 * capabilities shared between device and function, this relies on
2267 * ice_parse_common_caps.
2268 *
2269 * Loop through the list of provided capabilities and extract the relevant
2270 * data into the function capabilities structured.
2271 */
2272 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2273 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2274 void *buf, u32 cap_count)
2275 {
2276 struct ice_aqc_list_caps_elem *cap_resp;
2277 u32 i;
2278
2279 cap_resp = buf;
2280
2281 memset(func_p, 0, sizeof(*func_p));
2282
2283 for (i = 0; i < cap_count; i++) {
2284 u16 cap = le16_to_cpu(cap_resp[i].cap);
2285 bool found;
2286
2287 found = ice_parse_common_caps(hw, &func_p->common_cap,
2288 &cap_resp[i], "func caps");
2289
2290 switch (cap) {
2291 case ICE_AQC_CAPS_VF:
2292 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2293 break;
2294 case ICE_AQC_CAPS_VSI:
2295 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2296 break;
2297 case ICE_AQC_CAPS_1588:
2298 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2299 break;
2300 case ICE_AQC_CAPS_FD:
2301 ice_parse_fdir_func_caps(hw, func_p);
2302 break;
2303 default:
2304 /* Don't list common capabilities as unknown */
2305 if (!found)
2306 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2307 i, cap);
2308 break;
2309 }
2310 }
2311
2312 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2313 }
2314
2315 /**
2316 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2317 * @hw: pointer to the HW struct
2318 * @dev_p: pointer to device capabilities structure
2319 * @cap: capability element to parse
2320 *
2321 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2322 */
2323 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2324 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2325 struct ice_aqc_list_caps_elem *cap)
2326 {
2327 u32 number = le32_to_cpu(cap->number);
2328
2329 dev_p->num_funcs = hweight32(number);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2331 dev_p->num_funcs);
2332 }
2333
2334 /**
2335 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2336 * @hw: pointer to the HW struct
2337 * @dev_p: pointer to device capabilities structure
2338 * @cap: capability element to parse
2339 *
2340 * Parse ICE_AQC_CAPS_VF for device capabilities.
2341 */
2342 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2343 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2344 struct ice_aqc_list_caps_elem *cap)
2345 {
2346 u32 number = le32_to_cpu(cap->number);
2347
2348 dev_p->num_vfs_exposed = number;
2349 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2350 dev_p->num_vfs_exposed);
2351 }
2352
2353 /**
2354 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2355 * @hw: pointer to the HW struct
2356 * @dev_p: pointer to device capabilities structure
2357 * @cap: capability element to parse
2358 *
2359 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2360 */
2361 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2362 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2363 struct ice_aqc_list_caps_elem *cap)
2364 {
2365 u32 number = le32_to_cpu(cap->number);
2366
2367 dev_p->num_vsi_allocd_to_host = number;
2368 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2369 dev_p->num_vsi_allocd_to_host);
2370 }
2371
2372 /**
2373 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2374 * @hw: pointer to the HW struct
2375 * @dev_p: pointer to device capabilities structure
2376 * @cap: capability element to parse
2377 *
2378 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2379 */
2380 static void
ice_parse_1588_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2381 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2382 struct ice_aqc_list_caps_elem *cap)
2383 {
2384 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2385 u32 logical_id = le32_to_cpu(cap->logical_id);
2386 u32 phys_id = le32_to_cpu(cap->phys_id);
2387 u32 number = le32_to_cpu(cap->number);
2388
2389 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2390 dev_p->common_cap.ieee_1588 = info->ena;
2391
2392 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2393 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2394 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2395
2396 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2397 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2398 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2399
2400 info->ena_ports = logical_id;
2401 info->tmr_own_map = phys_id;
2402
2403 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2404 dev_p->common_cap.ieee_1588);
2405 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2406 info->tmr0_owner);
2407 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2408 info->tmr0_owned);
2409 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2410 info->tmr0_ena);
2411 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2412 info->tmr1_owner);
2413 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2414 info->tmr1_owned);
2415 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2416 info->tmr1_ena);
2417 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2418 info->ena_ports);
2419 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2420 info->tmr_own_map);
2421 }
2422
2423 /**
2424 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2425 * @hw: pointer to the HW struct
2426 * @dev_p: pointer to device capabilities structure
2427 * @cap: capability element to parse
2428 *
2429 * Parse ICE_AQC_CAPS_FD for device capabilities.
2430 */
2431 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2432 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2433 struct ice_aqc_list_caps_elem *cap)
2434 {
2435 u32 number = le32_to_cpu(cap->number);
2436
2437 dev_p->num_flow_director_fltr = number;
2438 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2439 dev_p->num_flow_director_fltr);
2440 }
2441
2442 /**
2443 * ice_parse_dev_caps - Parse device capabilities
2444 * @hw: pointer to the HW struct
2445 * @dev_p: pointer to device capabilities structure
2446 * @buf: buffer containing the device capability records
2447 * @cap_count: the number of capabilities
2448 *
2449 * Helper device to parse device (0x000B) capabilities list. For
2450 * capabilities shared between device and function, this relies on
2451 * ice_parse_common_caps.
2452 *
2453 * Loop through the list of provided capabilities and extract the relevant
2454 * data into the device capabilities structured.
2455 */
2456 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2457 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2458 void *buf, u32 cap_count)
2459 {
2460 struct ice_aqc_list_caps_elem *cap_resp;
2461 u32 i;
2462
2463 cap_resp = buf;
2464
2465 memset(dev_p, 0, sizeof(*dev_p));
2466
2467 for (i = 0; i < cap_count; i++) {
2468 u16 cap = le16_to_cpu(cap_resp[i].cap);
2469 bool found;
2470
2471 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2472 &cap_resp[i], "dev caps");
2473
2474 switch (cap) {
2475 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2476 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2477 break;
2478 case ICE_AQC_CAPS_VF:
2479 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2480 break;
2481 case ICE_AQC_CAPS_VSI:
2482 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2483 break;
2484 case ICE_AQC_CAPS_1588:
2485 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2486 break;
2487 case ICE_AQC_CAPS_FD:
2488 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2489 break;
2490 default:
2491 /* Don't list common capabilities as unknown */
2492 if (!found)
2493 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2494 i, cap);
2495 break;
2496 }
2497 }
2498
2499 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2500 }
2501
2502 /**
2503 * ice_aq_list_caps - query function/device capabilities
2504 * @hw: pointer to the HW struct
2505 * @buf: a buffer to hold the capabilities
2506 * @buf_size: size of the buffer
2507 * @cap_count: if not NULL, set to the number of capabilities reported
2508 * @opc: capabilities type to discover, device or function
2509 * @cd: pointer to command details structure or NULL
2510 *
2511 * Get the function (0x000A) or device (0x000B) capabilities description from
2512 * firmware and store it in the buffer.
2513 *
2514 * If the cap_count pointer is not NULL, then it is set to the number of
2515 * capabilities firmware will report. Note that if the buffer size is too
2516 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2517 * cap_count will still be updated in this case. It is recommended that the
2518 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2519 * firmware could return) to avoid this.
2520 */
2521 int
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2522 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2523 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2524 {
2525 struct ice_aqc_list_caps *cmd;
2526 struct ice_aq_desc desc;
2527 int status;
2528
2529 cmd = &desc.params.get_cap;
2530
2531 if (opc != ice_aqc_opc_list_func_caps &&
2532 opc != ice_aqc_opc_list_dev_caps)
2533 return -EINVAL;
2534
2535 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2536 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2537
2538 if (cap_count)
2539 *cap_count = le32_to_cpu(cmd->count);
2540
2541 return status;
2542 }
2543
2544 /**
2545 * ice_discover_dev_caps - Read and extract device capabilities
2546 * @hw: pointer to the hardware structure
2547 * @dev_caps: pointer to device capabilities structure
2548 *
2549 * Read the device capabilities and extract them into the dev_caps structure
2550 * for later use.
2551 */
2552 int
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2553 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2554 {
2555 u32 cap_count = 0;
2556 void *cbuf;
2557 int status;
2558
2559 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2560 if (!cbuf)
2561 return -ENOMEM;
2562
2563 /* Although the driver doesn't know the number of capabilities the
2564 * device will return, we can simply send a 4KB buffer, the maximum
2565 * possible size that firmware can return.
2566 */
2567 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2568
2569 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2570 ice_aqc_opc_list_dev_caps, NULL);
2571 if (!status)
2572 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2573 kfree(cbuf);
2574
2575 return status;
2576 }
2577
2578 /**
2579 * ice_discover_func_caps - Read and extract function capabilities
2580 * @hw: pointer to the hardware structure
2581 * @func_caps: pointer to function capabilities structure
2582 *
2583 * Read the function capabilities and extract them into the func_caps structure
2584 * for later use.
2585 */
2586 static int
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2587 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2588 {
2589 u32 cap_count = 0;
2590 void *cbuf;
2591 int status;
2592
2593 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2594 if (!cbuf)
2595 return -ENOMEM;
2596
2597 /* Although the driver doesn't know the number of capabilities the
2598 * device will return, we can simply send a 4KB buffer, the maximum
2599 * possible size that firmware can return.
2600 */
2601 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2602
2603 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2604 ice_aqc_opc_list_func_caps, NULL);
2605 if (!status)
2606 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2607 kfree(cbuf);
2608
2609 return status;
2610 }
2611
2612 /**
2613 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2614 * @hw: pointer to the hardware structure
2615 */
ice_set_safe_mode_caps(struct ice_hw * hw)2616 void ice_set_safe_mode_caps(struct ice_hw *hw)
2617 {
2618 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2619 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2620 struct ice_hw_common_caps cached_caps;
2621 u32 num_funcs;
2622
2623 /* cache some func_caps values that should be restored after memset */
2624 cached_caps = func_caps->common_cap;
2625
2626 /* unset func capabilities */
2627 memset(func_caps, 0, sizeof(*func_caps));
2628
2629 #define ICE_RESTORE_FUNC_CAP(name) \
2630 func_caps->common_cap.name = cached_caps.name
2631
2632 /* restore cached values */
2633 ICE_RESTORE_FUNC_CAP(valid_functions);
2634 ICE_RESTORE_FUNC_CAP(txq_first_id);
2635 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2636 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2637 ICE_RESTORE_FUNC_CAP(max_mtu);
2638 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2639 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2640 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2641 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2642
2643 /* one Tx and one Rx queue in safe mode */
2644 func_caps->common_cap.num_rxq = 1;
2645 func_caps->common_cap.num_txq = 1;
2646
2647 /* two MSIX vectors, one for traffic and one for misc causes */
2648 func_caps->common_cap.num_msix_vectors = 2;
2649 func_caps->guar_num_vsi = 1;
2650
2651 /* cache some dev_caps values that should be restored after memset */
2652 cached_caps = dev_caps->common_cap;
2653 num_funcs = dev_caps->num_funcs;
2654
2655 /* unset dev capabilities */
2656 memset(dev_caps, 0, sizeof(*dev_caps));
2657
2658 #define ICE_RESTORE_DEV_CAP(name) \
2659 dev_caps->common_cap.name = cached_caps.name
2660
2661 /* restore cached values */
2662 ICE_RESTORE_DEV_CAP(valid_functions);
2663 ICE_RESTORE_DEV_CAP(txq_first_id);
2664 ICE_RESTORE_DEV_CAP(rxq_first_id);
2665 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2666 ICE_RESTORE_DEV_CAP(max_mtu);
2667 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2668 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2669 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2670 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2671 dev_caps->num_funcs = num_funcs;
2672
2673 /* one Tx and one Rx queue per function in safe mode */
2674 dev_caps->common_cap.num_rxq = num_funcs;
2675 dev_caps->common_cap.num_txq = num_funcs;
2676
2677 /* two MSIX vectors per function */
2678 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2679 }
2680
2681 /**
2682 * ice_get_caps - get info about the HW
2683 * @hw: pointer to the hardware structure
2684 */
ice_get_caps(struct ice_hw * hw)2685 int ice_get_caps(struct ice_hw *hw)
2686 {
2687 int status;
2688
2689 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2690 if (status)
2691 return status;
2692
2693 return ice_discover_func_caps(hw, &hw->func_caps);
2694 }
2695
2696 /**
2697 * ice_aq_manage_mac_write - manage MAC address write command
2698 * @hw: pointer to the HW struct
2699 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2700 * @flags: flags to control write behavior
2701 * @cd: pointer to command details structure or NULL
2702 *
2703 * This function is used to write MAC address to the NVM (0x0108).
2704 */
2705 int
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2706 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2707 struct ice_sq_cd *cd)
2708 {
2709 struct ice_aqc_manage_mac_write *cmd;
2710 struct ice_aq_desc desc;
2711
2712 cmd = &desc.params.mac_write;
2713 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2714
2715 cmd->flags = flags;
2716 ether_addr_copy(cmd->mac_addr, mac_addr);
2717
2718 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2719 }
2720
2721 /**
2722 * ice_aq_clear_pxe_mode
2723 * @hw: pointer to the HW struct
2724 *
2725 * Tell the firmware that the driver is taking over from PXE (0x0110).
2726 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2727 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2728 {
2729 struct ice_aq_desc desc;
2730
2731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2732 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2733
2734 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2735 }
2736
2737 /**
2738 * ice_clear_pxe_mode - clear pxe operations mode
2739 * @hw: pointer to the HW struct
2740 *
2741 * Make sure all PXE mode settings are cleared, including things
2742 * like descriptor fetch/write-back mode.
2743 */
ice_clear_pxe_mode(struct ice_hw * hw)2744 void ice_clear_pxe_mode(struct ice_hw *hw)
2745 {
2746 if (ice_check_sq_alive(hw, &hw->adminq))
2747 ice_aq_clear_pxe_mode(hw);
2748 }
2749
2750 /**
2751 * ice_aq_set_port_params - set physical port parameters.
2752 * @pi: pointer to the port info struct
2753 * @double_vlan: if set double VLAN is enabled
2754 * @cd: pointer to command details structure or NULL
2755 *
2756 * Set Physical port parameters (0x0203)
2757 */
2758 int
ice_aq_set_port_params(struct ice_port_info * pi,bool double_vlan,struct ice_sq_cd * cd)2759 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
2760 struct ice_sq_cd *cd)
2761
2762 {
2763 struct ice_aqc_set_port_params *cmd;
2764 struct ice_hw *hw = pi->hw;
2765 struct ice_aq_desc desc;
2766 u16 cmd_flags = 0;
2767
2768 cmd = &desc.params.set_port_params;
2769
2770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2771 if (double_vlan)
2772 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2773 cmd->cmd_flags = cpu_to_le16(cmd_flags);
2774
2775 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2776 }
2777
2778 /**
2779 * ice_get_link_speed_based_on_phy_type - returns link speed
2780 * @phy_type_low: lower part of phy_type
2781 * @phy_type_high: higher part of phy_type
2782 *
2783 * This helper function will convert an entry in PHY type structure
2784 * [phy_type_low, phy_type_high] to its corresponding link speed.
2785 * Note: In the structure of [phy_type_low, phy_type_high], there should
2786 * be one bit set, as this function will convert one PHY type to its
2787 * speed.
2788 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2789 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2790 */
2791 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)2792 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2793 {
2794 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2795 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2796
2797 switch (phy_type_low) {
2798 case ICE_PHY_TYPE_LOW_100BASE_TX:
2799 case ICE_PHY_TYPE_LOW_100M_SGMII:
2800 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2801 break;
2802 case ICE_PHY_TYPE_LOW_1000BASE_T:
2803 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2804 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2805 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2806 case ICE_PHY_TYPE_LOW_1G_SGMII:
2807 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2808 break;
2809 case ICE_PHY_TYPE_LOW_2500BASE_T:
2810 case ICE_PHY_TYPE_LOW_2500BASE_X:
2811 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2812 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2813 break;
2814 case ICE_PHY_TYPE_LOW_5GBASE_T:
2815 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2816 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2817 break;
2818 case ICE_PHY_TYPE_LOW_10GBASE_T:
2819 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2820 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2821 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2822 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2823 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2824 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2825 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2826 break;
2827 case ICE_PHY_TYPE_LOW_25GBASE_T:
2828 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2829 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2830 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2831 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2832 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2833 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2834 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2835 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2836 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2837 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2838 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2839 break;
2840 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2841 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2842 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2843 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2844 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2845 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2846 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2847 break;
2848 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2849 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2850 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2851 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2852 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2853 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2854 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2855 case ICE_PHY_TYPE_LOW_50G_AUI2:
2856 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2857 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2858 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2859 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2860 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2861 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2862 case ICE_PHY_TYPE_LOW_50G_AUI1:
2863 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2864 break;
2865 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2866 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2867 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2868 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2869 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2870 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2871 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2872 case ICE_PHY_TYPE_LOW_100G_AUI4:
2873 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2874 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2875 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2876 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2877 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2878 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2879 break;
2880 default:
2881 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2882 break;
2883 }
2884
2885 switch (phy_type_high) {
2886 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2887 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2888 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2889 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2890 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2891 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2892 break;
2893 default:
2894 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2895 break;
2896 }
2897
2898 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2899 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2900 return ICE_AQ_LINK_SPEED_UNKNOWN;
2901 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2902 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2903 return ICE_AQ_LINK_SPEED_UNKNOWN;
2904 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2905 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2906 return speed_phy_type_low;
2907 else
2908 return speed_phy_type_high;
2909 }
2910
2911 /**
2912 * ice_update_phy_type
2913 * @phy_type_low: pointer to the lower part of phy_type
2914 * @phy_type_high: pointer to the higher part of phy_type
2915 * @link_speeds_bitmap: targeted link speeds bitmap
2916 *
2917 * Note: For the link_speeds_bitmap structure, you can check it at
2918 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2919 * link_speeds_bitmap include multiple speeds.
2920 *
2921 * Each entry in this [phy_type_low, phy_type_high] structure will
2922 * present a certain link speed. This helper function will turn on bits
2923 * in [phy_type_low, phy_type_high] structure based on the value of
2924 * link_speeds_bitmap input parameter.
2925 */
2926 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)2927 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2928 u16 link_speeds_bitmap)
2929 {
2930 u64 pt_high;
2931 u64 pt_low;
2932 int index;
2933 u16 speed;
2934
2935 /* We first check with low part of phy_type */
2936 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2937 pt_low = BIT_ULL(index);
2938 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2939
2940 if (link_speeds_bitmap & speed)
2941 *phy_type_low |= BIT_ULL(index);
2942 }
2943
2944 /* We then check with high part of phy_type */
2945 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2946 pt_high = BIT_ULL(index);
2947 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2948
2949 if (link_speeds_bitmap & speed)
2950 *phy_type_high |= BIT_ULL(index);
2951 }
2952 }
2953
2954 /**
2955 * ice_aq_set_phy_cfg
2956 * @hw: pointer to the HW struct
2957 * @pi: port info structure of the interested logical port
2958 * @cfg: structure with PHY configuration data to be set
2959 * @cd: pointer to command details structure or NULL
2960 *
2961 * Set the various PHY configuration parameters supported on the Port.
2962 * One or more of the Set PHY config parameters may be ignored in an MFP
2963 * mode as the PF may not have the privilege to set some of the PHY Config
2964 * parameters. This status will be indicated by the command response (0x0601).
2965 */
2966 int
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)2967 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2968 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2969 {
2970 struct ice_aq_desc desc;
2971 int status;
2972
2973 if (!cfg)
2974 return -EINVAL;
2975
2976 /* Ensure that only valid bits of cfg->caps can be turned on. */
2977 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2978 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2979 cfg->caps);
2980
2981 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2982 }
2983
2984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2985 desc.params.set_phy.lport_num = pi->lport;
2986 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2987
2988 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2989 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2990 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2991 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2992 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2993 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2994 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2995 cfg->low_power_ctrl_an);
2996 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2997 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2998 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2999 cfg->link_fec_opt);
3000
3001 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3002 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3003 status = 0;
3004
3005 if (!status)
3006 pi->phy.curr_user_phy_cfg = *cfg;
3007
3008 return status;
3009 }
3010
3011 /**
3012 * ice_update_link_info - update status of the HW network link
3013 * @pi: port info structure of the interested logical port
3014 */
ice_update_link_info(struct ice_port_info * pi)3015 int ice_update_link_info(struct ice_port_info *pi)
3016 {
3017 struct ice_link_status *li;
3018 int status;
3019
3020 if (!pi)
3021 return -EINVAL;
3022
3023 li = &pi->phy.link_info;
3024
3025 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3026 if (status)
3027 return status;
3028
3029 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3030 struct ice_aqc_get_phy_caps_data *pcaps;
3031 struct ice_hw *hw;
3032
3033 hw = pi->hw;
3034 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
3035 GFP_KERNEL);
3036 if (!pcaps)
3037 return -ENOMEM;
3038
3039 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3040 pcaps, NULL);
3041
3042 devm_kfree(ice_hw_to_dev(hw), pcaps);
3043 }
3044
3045 return status;
3046 }
3047
3048 /**
3049 * ice_cache_phy_user_req
3050 * @pi: port information structure
3051 * @cache_data: PHY logging data
3052 * @cache_mode: PHY logging mode
3053 *
3054 * Log the user request on (FC, FEC, SPEED) for later use.
3055 */
3056 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3057 ice_cache_phy_user_req(struct ice_port_info *pi,
3058 struct ice_phy_cache_mode_data cache_data,
3059 enum ice_phy_cache_mode cache_mode)
3060 {
3061 if (!pi)
3062 return;
3063
3064 switch (cache_mode) {
3065 case ICE_FC_MODE:
3066 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3067 break;
3068 case ICE_SPEED_MODE:
3069 pi->phy.curr_user_speed_req =
3070 cache_data.data.curr_user_speed_req;
3071 break;
3072 case ICE_FEC_MODE:
3073 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3074 break;
3075 default:
3076 break;
3077 }
3078 }
3079
3080 /**
3081 * ice_caps_to_fc_mode
3082 * @caps: PHY capabilities
3083 *
3084 * Convert PHY FC capabilities to ice FC mode
3085 */
ice_caps_to_fc_mode(u8 caps)3086 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3087 {
3088 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3089 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3090 return ICE_FC_FULL;
3091
3092 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3093 return ICE_FC_TX_PAUSE;
3094
3095 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3096 return ICE_FC_RX_PAUSE;
3097
3098 return ICE_FC_NONE;
3099 }
3100
3101 /**
3102 * ice_caps_to_fec_mode
3103 * @caps: PHY capabilities
3104 * @fec_options: Link FEC options
3105 *
3106 * Convert PHY FEC capabilities to ice FEC mode
3107 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3108 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3109 {
3110 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3111 return ICE_FEC_AUTO;
3112
3113 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3114 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3115 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3116 ICE_AQC_PHY_FEC_25G_KR_REQ))
3117 return ICE_FEC_BASER;
3118
3119 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3120 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3121 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3122 return ICE_FEC_RS;
3123
3124 return ICE_FEC_NONE;
3125 }
3126
3127 /**
3128 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3129 * @pi: port information structure
3130 * @cfg: PHY configuration data to set FC mode
3131 * @req_mode: FC mode to configure
3132 */
3133 int
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3134 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3135 enum ice_fc_mode req_mode)
3136 {
3137 struct ice_phy_cache_mode_data cache_data;
3138 u8 pause_mask = 0x0;
3139
3140 if (!pi || !cfg)
3141 return -EINVAL;
3142
3143 switch (req_mode) {
3144 case ICE_FC_FULL:
3145 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3146 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3147 break;
3148 case ICE_FC_RX_PAUSE:
3149 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3150 break;
3151 case ICE_FC_TX_PAUSE:
3152 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3153 break;
3154 default:
3155 break;
3156 }
3157
3158 /* clear the old pause settings */
3159 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3160 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3161
3162 /* set the new capabilities */
3163 cfg->caps |= pause_mask;
3164
3165 /* Cache user FC request */
3166 cache_data.data.curr_user_fc_req = req_mode;
3167 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3168
3169 return 0;
3170 }
3171
3172 /**
3173 * ice_set_fc
3174 * @pi: port information structure
3175 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3176 * @ena_auto_link_update: enable automatic link update
3177 *
3178 * Set the requested flow control mode.
3179 */
3180 int
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3181 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3182 {
3183 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3184 struct ice_aqc_get_phy_caps_data *pcaps;
3185 struct ice_hw *hw;
3186 int status;
3187
3188 if (!pi || !aq_failures)
3189 return -EINVAL;
3190
3191 *aq_failures = 0;
3192 hw = pi->hw;
3193
3194 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3195 if (!pcaps)
3196 return -ENOMEM;
3197
3198 /* Get the current PHY config */
3199 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3200 pcaps, NULL);
3201 if (status) {
3202 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3203 goto out;
3204 }
3205
3206 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3207
3208 /* Configure the set PHY data */
3209 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3210 if (status)
3211 goto out;
3212
3213 /* If the capabilities have changed, then set the new config */
3214 if (cfg.caps != pcaps->caps) {
3215 int retry_count, retry_max = 10;
3216
3217 /* Auto restart link so settings take effect */
3218 if (ena_auto_link_update)
3219 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3220
3221 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3222 if (status) {
3223 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3224 goto out;
3225 }
3226
3227 /* Update the link info
3228 * It sometimes takes a really long time for link to
3229 * come back from the atomic reset. Thus, we wait a
3230 * little bit.
3231 */
3232 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3233 status = ice_update_link_info(pi);
3234
3235 if (!status)
3236 break;
3237
3238 mdelay(100);
3239 }
3240
3241 if (status)
3242 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3243 }
3244
3245 out:
3246 devm_kfree(ice_hw_to_dev(hw), pcaps);
3247 return status;
3248 }
3249
3250 /**
3251 * ice_phy_caps_equals_cfg
3252 * @phy_caps: PHY capabilities
3253 * @phy_cfg: PHY configuration
3254 *
3255 * Helper function to determine if PHY capabilities matches PHY
3256 * configuration
3257 */
3258 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3259 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3260 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3261 {
3262 u8 caps_mask, cfg_mask;
3263
3264 if (!phy_caps || !phy_cfg)
3265 return false;
3266
3267 /* These bits are not common between capabilities and configuration.
3268 * Do not use them to determine equality.
3269 */
3270 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3271 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3272 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3273
3274 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3275 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3276 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3277 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3278 phy_caps->eee_cap != phy_cfg->eee_cap ||
3279 phy_caps->eeer_value != phy_cfg->eeer_value ||
3280 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3281 return false;
3282
3283 return true;
3284 }
3285
3286 /**
3287 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3288 * @pi: port information structure
3289 * @caps: PHY ability structure to copy date from
3290 * @cfg: PHY configuration structure to copy data to
3291 *
3292 * Helper function to copy AQC PHY get ability data to PHY set configuration
3293 * data structure
3294 */
3295 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3296 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3297 struct ice_aqc_get_phy_caps_data *caps,
3298 struct ice_aqc_set_phy_cfg_data *cfg)
3299 {
3300 if (!pi || !caps || !cfg)
3301 return;
3302
3303 memset(cfg, 0, sizeof(*cfg));
3304 cfg->phy_type_low = caps->phy_type_low;
3305 cfg->phy_type_high = caps->phy_type_high;
3306 cfg->caps = caps->caps;
3307 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3308 cfg->eee_cap = caps->eee_cap;
3309 cfg->eeer_value = caps->eeer_value;
3310 cfg->link_fec_opt = caps->link_fec_options;
3311 cfg->module_compliance_enforcement =
3312 caps->module_compliance_enforcement;
3313 }
3314
3315 /**
3316 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3317 * @pi: port information structure
3318 * @cfg: PHY configuration data to set FEC mode
3319 * @fec: FEC mode to configure
3320 */
3321 int
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3322 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3323 enum ice_fec_mode fec)
3324 {
3325 struct ice_aqc_get_phy_caps_data *pcaps;
3326 struct ice_hw *hw;
3327 int status;
3328
3329 if (!pi || !cfg)
3330 return -EINVAL;
3331
3332 hw = pi->hw;
3333
3334 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3335 if (!pcaps)
3336 return -ENOMEM;
3337
3338 status = ice_aq_get_phy_caps(pi, false,
3339 (ice_fw_supports_report_dflt_cfg(hw) ?
3340 ICE_AQC_REPORT_DFLT_CFG :
3341 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3342 if (status)
3343 goto out;
3344
3345 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3346 cfg->link_fec_opt = pcaps->link_fec_options;
3347
3348 switch (fec) {
3349 case ICE_FEC_BASER:
3350 /* Clear RS bits, and AND BASE-R ability
3351 * bits and OR request bits.
3352 */
3353 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3354 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3355 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3356 ICE_AQC_PHY_FEC_25G_KR_REQ;
3357 break;
3358 case ICE_FEC_RS:
3359 /* Clear BASE-R bits, and AND RS ability
3360 * bits and OR request bits.
3361 */
3362 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3363 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3364 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3365 break;
3366 case ICE_FEC_NONE:
3367 /* Clear all FEC option bits. */
3368 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3369 break;
3370 case ICE_FEC_AUTO:
3371 /* AND auto FEC bit, and all caps bits. */
3372 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3373 cfg->link_fec_opt |= pcaps->link_fec_options;
3374 break;
3375 default:
3376 status = -EINVAL;
3377 break;
3378 }
3379
3380 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3381 !ice_fw_supports_report_dflt_cfg(hw)) {
3382 struct ice_link_default_override_tlv tlv = { 0 };
3383
3384 status = ice_get_link_default_override(&tlv, pi);
3385 if (status)
3386 goto out;
3387
3388 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3389 (tlv.options & ICE_LINK_OVERRIDE_EN))
3390 cfg->link_fec_opt = tlv.fec_options;
3391 }
3392
3393 out:
3394 kfree(pcaps);
3395
3396 return status;
3397 }
3398
3399 /**
3400 * ice_get_link_status - get status of the HW network link
3401 * @pi: port information structure
3402 * @link_up: pointer to bool (true/false = linkup/linkdown)
3403 *
3404 * Variable link_up is true if link is up, false if link is down.
3405 * The variable link_up is invalid if status is non zero. As a
3406 * result of this call, link status reporting becomes enabled
3407 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3408 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3409 {
3410 struct ice_phy_info *phy_info;
3411 int status = 0;
3412
3413 if (!pi || !link_up)
3414 return -EINVAL;
3415
3416 phy_info = &pi->phy;
3417
3418 if (phy_info->get_link_info) {
3419 status = ice_update_link_info(pi);
3420
3421 if (status)
3422 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3423 status);
3424 }
3425
3426 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3427
3428 return status;
3429 }
3430
3431 /**
3432 * ice_aq_set_link_restart_an
3433 * @pi: pointer to the port information structure
3434 * @ena_link: if true: enable link, if false: disable link
3435 * @cd: pointer to command details structure or NULL
3436 *
3437 * Sets up the link and restarts the Auto-Negotiation over the link.
3438 */
3439 int
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3440 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3441 struct ice_sq_cd *cd)
3442 {
3443 struct ice_aqc_restart_an *cmd;
3444 struct ice_aq_desc desc;
3445
3446 cmd = &desc.params.restart_an;
3447
3448 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3449
3450 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3451 cmd->lport_num = pi->lport;
3452 if (ena_link)
3453 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3454 else
3455 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3456
3457 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3458 }
3459
3460 /**
3461 * ice_aq_set_event_mask
3462 * @hw: pointer to the HW struct
3463 * @port_num: port number of the physical function
3464 * @mask: event mask to be set
3465 * @cd: pointer to command details structure or NULL
3466 *
3467 * Set event mask (0x0613)
3468 */
3469 int
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3470 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3471 struct ice_sq_cd *cd)
3472 {
3473 struct ice_aqc_set_event_mask *cmd;
3474 struct ice_aq_desc desc;
3475
3476 cmd = &desc.params.set_event_mask;
3477
3478 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3479
3480 cmd->lport_num = port_num;
3481
3482 cmd->event_mask = cpu_to_le16(mask);
3483 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3484 }
3485
3486 /**
3487 * ice_aq_set_mac_loopback
3488 * @hw: pointer to the HW struct
3489 * @ena_lpbk: Enable or Disable loopback
3490 * @cd: pointer to command details structure or NULL
3491 *
3492 * Enable/disable loopback on a given port
3493 */
3494 int
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3495 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3496 {
3497 struct ice_aqc_set_mac_lb *cmd;
3498 struct ice_aq_desc desc;
3499
3500 cmd = &desc.params.set_mac_lb;
3501
3502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3503 if (ena_lpbk)
3504 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3505
3506 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3507 }
3508
3509 /**
3510 * ice_aq_set_port_id_led
3511 * @pi: pointer to the port information
3512 * @is_orig_mode: is this LED set to original mode (by the net-list)
3513 * @cd: pointer to command details structure or NULL
3514 *
3515 * Set LED value for the given port (0x06e9)
3516 */
3517 int
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3518 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3519 struct ice_sq_cd *cd)
3520 {
3521 struct ice_aqc_set_port_id_led *cmd;
3522 struct ice_hw *hw = pi->hw;
3523 struct ice_aq_desc desc;
3524
3525 cmd = &desc.params.set_port_id_led;
3526
3527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3528
3529 if (is_orig_mode)
3530 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3531 else
3532 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3533
3534 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3535 }
3536
3537 /**
3538 * ice_aq_sff_eeprom
3539 * @hw: pointer to the HW struct
3540 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3541 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3542 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3543 * @page: QSFP page
3544 * @set_page: set or ignore the page
3545 * @data: pointer to data buffer to be read/written to the I2C device.
3546 * @length: 1-16 for read, 1 for write.
3547 * @write: 0 read, 1 for write.
3548 * @cd: pointer to command details structure or NULL
3549 *
3550 * Read/Write SFF EEPROM (0x06EE)
3551 */
3552 int
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3553 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3554 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3555 bool write, struct ice_sq_cd *cd)
3556 {
3557 struct ice_aqc_sff_eeprom *cmd;
3558 struct ice_aq_desc desc;
3559 int status;
3560
3561 if (!data || (mem_addr & 0xff00))
3562 return -EINVAL;
3563
3564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3565 cmd = &desc.params.read_write_sff_param;
3566 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3567 cmd->lport_num = (u8)(lport & 0xff);
3568 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3569 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3570 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3571 ((set_page <<
3572 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3573 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3574 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3575 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3576 if (write)
3577 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3578
3579 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3580 return status;
3581 }
3582
3583 /**
3584 * __ice_aq_get_set_rss_lut
3585 * @hw: pointer to the hardware structure
3586 * @params: RSS LUT parameters
3587 * @set: set true to set the table, false to get the table
3588 *
3589 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3590 */
3591 static int
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3592 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3593 {
3594 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3595 struct ice_aqc_get_set_rss_lut *cmd_resp;
3596 struct ice_aq_desc desc;
3597 int status;
3598 u8 *lut;
3599
3600 if (!params)
3601 return -EINVAL;
3602
3603 vsi_handle = params->vsi_handle;
3604 lut = params->lut;
3605
3606 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3607 return -EINVAL;
3608
3609 lut_size = params->lut_size;
3610 lut_type = params->lut_type;
3611 glob_lut_idx = params->global_lut_id;
3612 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3613
3614 cmd_resp = &desc.params.get_set_rss_lut;
3615
3616 if (set) {
3617 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3618 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3619 } else {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3621 }
3622
3623 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3624 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3625 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3626 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3627
3628 switch (lut_type) {
3629 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3630 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3631 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3632 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3633 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3634 break;
3635 default:
3636 status = -EINVAL;
3637 goto ice_aq_get_set_rss_lut_exit;
3638 }
3639
3640 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3641 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3642 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3643
3644 if (!set)
3645 goto ice_aq_get_set_rss_lut_send;
3646 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3647 if (!set)
3648 goto ice_aq_get_set_rss_lut_send;
3649 } else {
3650 goto ice_aq_get_set_rss_lut_send;
3651 }
3652
3653 /* LUT size is only valid for Global and PF table types */
3654 switch (lut_size) {
3655 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3656 break;
3657 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3658 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3659 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3660 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3661 break;
3662 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3663 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3664 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3665 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3666 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3667 break;
3668 }
3669 fallthrough;
3670 default:
3671 status = -EINVAL;
3672 goto ice_aq_get_set_rss_lut_exit;
3673 }
3674
3675 ice_aq_get_set_rss_lut_send:
3676 cmd_resp->flags = cpu_to_le16(flags);
3677 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3678
3679 ice_aq_get_set_rss_lut_exit:
3680 return status;
3681 }
3682
3683 /**
3684 * ice_aq_get_rss_lut
3685 * @hw: pointer to the hardware structure
3686 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3687 *
3688 * get the RSS lookup table, PF or VSI type
3689 */
3690 int
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)3691 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3692 {
3693 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3694 }
3695
3696 /**
3697 * ice_aq_set_rss_lut
3698 * @hw: pointer to the hardware structure
3699 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3700 *
3701 * set the RSS lookup table, PF or VSI type
3702 */
3703 int
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)3704 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3705 {
3706 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3707 }
3708
3709 /**
3710 * __ice_aq_get_set_rss_key
3711 * @hw: pointer to the HW struct
3712 * @vsi_id: VSI FW index
3713 * @key: pointer to key info struct
3714 * @set: set true to set the key, false to get the key
3715 *
3716 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3717 */
3718 static int
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)3719 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3720 struct ice_aqc_get_set_rss_keys *key, bool set)
3721 {
3722 struct ice_aqc_get_set_rss_key *cmd_resp;
3723 u16 key_size = sizeof(*key);
3724 struct ice_aq_desc desc;
3725
3726 cmd_resp = &desc.params.get_set_rss_key;
3727
3728 if (set) {
3729 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3730 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3731 } else {
3732 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3733 }
3734
3735 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3736 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3737 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3738 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3739
3740 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3741 }
3742
3743 /**
3744 * ice_aq_get_rss_key
3745 * @hw: pointer to the HW struct
3746 * @vsi_handle: software VSI handle
3747 * @key: pointer to key info struct
3748 *
3749 * get the RSS key per VSI
3750 */
3751 int
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)3752 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3753 struct ice_aqc_get_set_rss_keys *key)
3754 {
3755 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3756 return -EINVAL;
3757
3758 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3759 key, false);
3760 }
3761
3762 /**
3763 * ice_aq_set_rss_key
3764 * @hw: pointer to the HW struct
3765 * @vsi_handle: software VSI handle
3766 * @keys: pointer to key info struct
3767 *
3768 * set the RSS key per VSI
3769 */
3770 int
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)3771 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3772 struct ice_aqc_get_set_rss_keys *keys)
3773 {
3774 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3775 return -EINVAL;
3776
3777 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3778 keys, true);
3779 }
3780
3781 /**
3782 * ice_aq_add_lan_txq
3783 * @hw: pointer to the hardware structure
3784 * @num_qgrps: Number of added queue groups
3785 * @qg_list: list of queue groups to be added
3786 * @buf_size: size of buffer for indirect command
3787 * @cd: pointer to command details structure or NULL
3788 *
3789 * Add Tx LAN queue (0x0C30)
3790 *
3791 * NOTE:
3792 * Prior to calling add Tx LAN queue:
3793 * Initialize the following as part of the Tx queue context:
3794 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3795 * Cache profile and Packet shaper profile.
3796 *
3797 * After add Tx LAN queue AQ command is completed:
3798 * Interrupts should be associated with specific queues,
3799 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3800 * flow.
3801 */
3802 static int
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)3803 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3804 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3805 struct ice_sq_cd *cd)
3806 {
3807 struct ice_aqc_add_tx_qgrp *list;
3808 struct ice_aqc_add_txqs *cmd;
3809 struct ice_aq_desc desc;
3810 u16 i, sum_size = 0;
3811
3812 cmd = &desc.params.add_txqs;
3813
3814 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3815
3816 if (!qg_list)
3817 return -EINVAL;
3818
3819 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3820 return -EINVAL;
3821
3822 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3823 sum_size += struct_size(list, txqs, list->num_txqs);
3824 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3825 list->num_txqs);
3826 }
3827
3828 if (buf_size != sum_size)
3829 return -EINVAL;
3830
3831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3832
3833 cmd->num_qgrps = num_qgrps;
3834
3835 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3836 }
3837
3838 /**
3839 * ice_aq_dis_lan_txq
3840 * @hw: pointer to the hardware structure
3841 * @num_qgrps: number of groups in the list
3842 * @qg_list: the list of groups to disable
3843 * @buf_size: the total size of the qg_list buffer in bytes
3844 * @rst_src: if called due to reset, specifies the reset source
3845 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3846 * @cd: pointer to command details structure or NULL
3847 *
3848 * Disable LAN Tx queue (0x0C31)
3849 */
3850 static int
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)3851 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3852 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3853 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3854 struct ice_sq_cd *cd)
3855 {
3856 struct ice_aqc_dis_txq_item *item;
3857 struct ice_aqc_dis_txqs *cmd;
3858 struct ice_aq_desc desc;
3859 u16 i, sz = 0;
3860 int status;
3861
3862 cmd = &desc.params.dis_txqs;
3863 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3864
3865 /* qg_list can be NULL only in VM/VF reset flow */
3866 if (!qg_list && !rst_src)
3867 return -EINVAL;
3868
3869 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3870 return -EINVAL;
3871
3872 cmd->num_entries = num_qgrps;
3873
3874 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3875 ICE_AQC_Q_DIS_TIMEOUT_M);
3876
3877 switch (rst_src) {
3878 case ICE_VM_RESET:
3879 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3880 cmd->vmvf_and_timeout |=
3881 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3882 break;
3883 case ICE_VF_RESET:
3884 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3885 /* In this case, FW expects vmvf_num to be absolute VF ID */
3886 cmd->vmvf_and_timeout |=
3887 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3888 ICE_AQC_Q_DIS_VMVF_NUM_M);
3889 break;
3890 case ICE_NO_RESET:
3891 default:
3892 break;
3893 }
3894
3895 /* flush pipe on time out */
3896 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3897 /* If no queue group info, we are in a reset flow. Issue the AQ */
3898 if (!qg_list)
3899 goto do_aq;
3900
3901 /* set RD bit to indicate that command buffer is provided by the driver
3902 * and it needs to be read by the firmware
3903 */
3904 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3905
3906 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3907 u16 item_size = struct_size(item, q_id, item->num_qs);
3908
3909 /* If the num of queues is even, add 2 bytes of padding */
3910 if ((item->num_qs % 2) == 0)
3911 item_size += 2;
3912
3913 sz += item_size;
3914
3915 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3916 }
3917
3918 if (buf_size != sz)
3919 return -EINVAL;
3920
3921 do_aq:
3922 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3923 if (status) {
3924 if (!qg_list)
3925 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3926 vmvf_num, hw->adminq.sq_last_status);
3927 else
3928 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3929 le16_to_cpu(qg_list[0].q_id[0]),
3930 hw->adminq.sq_last_status);
3931 }
3932 return status;
3933 }
3934
3935 /**
3936 * ice_aq_add_rdma_qsets
3937 * @hw: pointer to the hardware structure
3938 * @num_qset_grps: Number of RDMA Qset groups
3939 * @qset_list: list of Qset groups to be added
3940 * @buf_size: size of buffer for indirect command
3941 * @cd: pointer to command details structure or NULL
3942 *
3943 * Add Tx RDMA Qsets (0x0C33)
3944 */
3945 static int
ice_aq_add_rdma_qsets(struct ice_hw * hw,u8 num_qset_grps,struct ice_aqc_add_rdma_qset_data * qset_list,u16 buf_size,struct ice_sq_cd * cd)3946 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3947 struct ice_aqc_add_rdma_qset_data *qset_list,
3948 u16 buf_size, struct ice_sq_cd *cd)
3949 {
3950 struct ice_aqc_add_rdma_qset_data *list;
3951 struct ice_aqc_add_rdma_qset *cmd;
3952 struct ice_aq_desc desc;
3953 u16 i, sum_size = 0;
3954
3955 cmd = &desc.params.add_rdma_qset;
3956
3957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3958
3959 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3960 return -EINVAL;
3961
3962 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3963 u16 num_qsets = le16_to_cpu(list->num_qsets);
3964
3965 sum_size += struct_size(list, rdma_qsets, num_qsets);
3966 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3967 num_qsets);
3968 }
3969
3970 if (buf_size != sum_size)
3971 return -EINVAL;
3972
3973 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3974
3975 cmd->num_qset_grps = num_qset_grps;
3976
3977 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
3978 }
3979
3980 /* End of FW Admin Queue command wrappers */
3981
3982 /**
3983 * ice_write_byte - write a byte to a packed context structure
3984 * @src_ctx: the context structure to read from
3985 * @dest_ctx: the context to be written to
3986 * @ce_info: a description of the struct to be filled
3987 */
3988 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3989 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990 {
3991 u8 src_byte, dest_byte, mask;
3992 u8 *from, *dest;
3993 u16 shift_width;
3994
3995 /* copy from the next struct field */
3996 from = src_ctx + ce_info->offset;
3997
3998 /* prepare the bits and mask */
3999 shift_width = ce_info->lsb % 8;
4000 mask = (u8)(BIT(ce_info->width) - 1);
4001
4002 src_byte = *from;
4003 src_byte &= mask;
4004
4005 /* shift to correct alignment */
4006 mask <<= shift_width;
4007 src_byte <<= shift_width;
4008
4009 /* get the current bits from the target bit string */
4010 dest = dest_ctx + (ce_info->lsb / 8);
4011
4012 memcpy(&dest_byte, dest, sizeof(dest_byte));
4013
4014 dest_byte &= ~mask; /* get the bits not changing */
4015 dest_byte |= src_byte; /* add in the new bits */
4016
4017 /* put it all back */
4018 memcpy(dest, &dest_byte, sizeof(dest_byte));
4019 }
4020
4021 /**
4022 * ice_write_word - write a word to a packed context structure
4023 * @src_ctx: the context structure to read from
4024 * @dest_ctx: the context to be written to
4025 * @ce_info: a description of the struct to be filled
4026 */
4027 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4028 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4029 {
4030 u16 src_word, mask;
4031 __le16 dest_word;
4032 u8 *from, *dest;
4033 u16 shift_width;
4034
4035 /* copy from the next struct field */
4036 from = src_ctx + ce_info->offset;
4037
4038 /* prepare the bits and mask */
4039 shift_width = ce_info->lsb % 8;
4040 mask = BIT(ce_info->width) - 1;
4041
4042 /* don't swizzle the bits until after the mask because the mask bits
4043 * will be in a different bit position on big endian machines
4044 */
4045 src_word = *(u16 *)from;
4046 src_word &= mask;
4047
4048 /* shift to correct alignment */
4049 mask <<= shift_width;
4050 src_word <<= shift_width;
4051
4052 /* get the current bits from the target bit string */
4053 dest = dest_ctx + (ce_info->lsb / 8);
4054
4055 memcpy(&dest_word, dest, sizeof(dest_word));
4056
4057 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
4058 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
4059
4060 /* put it all back */
4061 memcpy(dest, &dest_word, sizeof(dest_word));
4062 }
4063
4064 /**
4065 * ice_write_dword - write a dword to a packed context structure
4066 * @src_ctx: the context structure to read from
4067 * @dest_ctx: the context to be written to
4068 * @ce_info: a description of the struct to be filled
4069 */
4070 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4071 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4072 {
4073 u32 src_dword, mask;
4074 __le32 dest_dword;
4075 u8 *from, *dest;
4076 u16 shift_width;
4077
4078 /* copy from the next struct field */
4079 from = src_ctx + ce_info->offset;
4080
4081 /* prepare the bits and mask */
4082 shift_width = ce_info->lsb % 8;
4083
4084 /* if the field width is exactly 32 on an x86 machine, then the shift
4085 * operation will not work because the SHL instructions count is masked
4086 * to 5 bits so the shift will do nothing
4087 */
4088 if (ce_info->width < 32)
4089 mask = BIT(ce_info->width) - 1;
4090 else
4091 mask = (u32)~0;
4092
4093 /* don't swizzle the bits until after the mask because the mask bits
4094 * will be in a different bit position on big endian machines
4095 */
4096 src_dword = *(u32 *)from;
4097 src_dword &= mask;
4098
4099 /* shift to correct alignment */
4100 mask <<= shift_width;
4101 src_dword <<= shift_width;
4102
4103 /* get the current bits from the target bit string */
4104 dest = dest_ctx + (ce_info->lsb / 8);
4105
4106 memcpy(&dest_dword, dest, sizeof(dest_dword));
4107
4108 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4109 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4110
4111 /* put it all back */
4112 memcpy(dest, &dest_dword, sizeof(dest_dword));
4113 }
4114
4115 /**
4116 * ice_write_qword - write a qword to a packed context structure
4117 * @src_ctx: the context structure to read from
4118 * @dest_ctx: the context to be written to
4119 * @ce_info: a description of the struct to be filled
4120 */
4121 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4122 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4123 {
4124 u64 src_qword, mask;
4125 __le64 dest_qword;
4126 u8 *from, *dest;
4127 u16 shift_width;
4128
4129 /* copy from the next struct field */
4130 from = src_ctx + ce_info->offset;
4131
4132 /* prepare the bits and mask */
4133 shift_width = ce_info->lsb % 8;
4134
4135 /* if the field width is exactly 64 on an x86 machine, then the shift
4136 * operation will not work because the SHL instructions count is masked
4137 * to 6 bits so the shift will do nothing
4138 */
4139 if (ce_info->width < 64)
4140 mask = BIT_ULL(ce_info->width) - 1;
4141 else
4142 mask = (u64)~0;
4143
4144 /* don't swizzle the bits until after the mask because the mask bits
4145 * will be in a different bit position on big endian machines
4146 */
4147 src_qword = *(u64 *)from;
4148 src_qword &= mask;
4149
4150 /* shift to correct alignment */
4151 mask <<= shift_width;
4152 src_qword <<= shift_width;
4153
4154 /* get the current bits from the target bit string */
4155 dest = dest_ctx + (ce_info->lsb / 8);
4156
4157 memcpy(&dest_qword, dest, sizeof(dest_qword));
4158
4159 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4160 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4161
4162 /* put it all back */
4163 memcpy(dest, &dest_qword, sizeof(dest_qword));
4164 }
4165
4166 /**
4167 * ice_set_ctx - set context bits in packed structure
4168 * @hw: pointer to the hardware structure
4169 * @src_ctx: pointer to a generic non-packed context structure
4170 * @dest_ctx: pointer to memory for the packed structure
4171 * @ce_info: a description of the structure to be transformed
4172 */
4173 int
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4174 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4175 const struct ice_ctx_ele *ce_info)
4176 {
4177 int f;
4178
4179 for (f = 0; ce_info[f].width; f++) {
4180 /* We have to deal with each element of the FW response
4181 * using the correct size so that we are correct regardless
4182 * of the endianness of the machine.
4183 */
4184 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4185 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4186 f, ce_info[f].width, ce_info[f].size_of);
4187 continue;
4188 }
4189 switch (ce_info[f].size_of) {
4190 case sizeof(u8):
4191 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4192 break;
4193 case sizeof(u16):
4194 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4195 break;
4196 case sizeof(u32):
4197 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4198 break;
4199 case sizeof(u64):
4200 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4201 break;
4202 default:
4203 return -EINVAL;
4204 }
4205 }
4206
4207 return 0;
4208 }
4209
4210 /**
4211 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4212 * @hw: pointer to the HW struct
4213 * @vsi_handle: software VSI handle
4214 * @tc: TC number
4215 * @q_handle: software queue handle
4216 */
4217 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)4218 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4219 {
4220 struct ice_vsi_ctx *vsi;
4221 struct ice_q_ctx *q_ctx;
4222
4223 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4224 if (!vsi)
4225 return NULL;
4226 if (q_handle >= vsi->num_lan_q_entries[tc])
4227 return NULL;
4228 if (!vsi->lan_q_ctx[tc])
4229 return NULL;
4230 q_ctx = vsi->lan_q_ctx[tc];
4231 return &q_ctx[q_handle];
4232 }
4233
4234 /**
4235 * ice_ena_vsi_txq
4236 * @pi: port information structure
4237 * @vsi_handle: software VSI handle
4238 * @tc: TC number
4239 * @q_handle: software queue handle
4240 * @num_qgrps: Number of added queue groups
4241 * @buf: list of queue groups to be added
4242 * @buf_size: size of buffer for indirect command
4243 * @cd: pointer to command details structure or NULL
4244 *
4245 * This function adds one LAN queue
4246 */
4247 int
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)4248 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4249 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4250 struct ice_sq_cd *cd)
4251 {
4252 struct ice_aqc_txsched_elem_data node = { 0 };
4253 struct ice_sched_node *parent;
4254 struct ice_q_ctx *q_ctx;
4255 struct ice_hw *hw;
4256 int status;
4257
4258 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4259 return -EIO;
4260
4261 if (num_qgrps > 1 || buf->num_txqs > 1)
4262 return -ENOSPC;
4263
4264 hw = pi->hw;
4265
4266 if (!ice_is_vsi_valid(hw, vsi_handle))
4267 return -EINVAL;
4268
4269 mutex_lock(&pi->sched_lock);
4270
4271 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4272 if (!q_ctx) {
4273 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4274 q_handle);
4275 status = -EINVAL;
4276 goto ena_txq_exit;
4277 }
4278
4279 /* find a parent node */
4280 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4281 ICE_SCHED_NODE_OWNER_LAN);
4282 if (!parent) {
4283 status = -EINVAL;
4284 goto ena_txq_exit;
4285 }
4286
4287 buf->parent_teid = parent->info.node_teid;
4288 node.parent_teid = parent->info.node_teid;
4289 /* Mark that the values in the "generic" section as valid. The default
4290 * value in the "generic" section is zero. This means that :
4291 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4292 * - 0 priority among siblings, indicated by Bit 1-3.
4293 * - WFQ, indicated by Bit 4.
4294 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4295 * Bit 5-6.
4296 * - Bit 7 is reserved.
4297 * Without setting the generic section as valid in valid_sections, the
4298 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4299 */
4300 buf->txqs[0].info.valid_sections =
4301 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4302 ICE_AQC_ELEM_VALID_EIR;
4303 buf->txqs[0].info.generic = 0;
4304 buf->txqs[0].info.cir_bw.bw_profile_idx =
4305 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4306 buf->txqs[0].info.cir_bw.bw_alloc =
4307 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4308 buf->txqs[0].info.eir_bw.bw_profile_idx =
4309 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4310 buf->txqs[0].info.eir_bw.bw_alloc =
4311 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4312
4313 /* add the LAN queue */
4314 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4315 if (status) {
4316 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4317 le16_to_cpu(buf->txqs[0].txq_id),
4318 hw->adminq.sq_last_status);
4319 goto ena_txq_exit;
4320 }
4321
4322 node.node_teid = buf->txqs[0].q_teid;
4323 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4324 q_ctx->q_handle = q_handle;
4325 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4326
4327 /* add a leaf node into scheduler tree queue layer */
4328 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4329 if (!status)
4330 status = ice_sched_replay_q_bw(pi, q_ctx);
4331
4332 ena_txq_exit:
4333 mutex_unlock(&pi->sched_lock);
4334 return status;
4335 }
4336
4337 /**
4338 * ice_dis_vsi_txq
4339 * @pi: port information structure
4340 * @vsi_handle: software VSI handle
4341 * @tc: TC number
4342 * @num_queues: number of queues
4343 * @q_handles: pointer to software queue handle array
4344 * @q_ids: pointer to the q_id array
4345 * @q_teids: pointer to queue node teids
4346 * @rst_src: if called due to reset, specifies the reset source
4347 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4348 * @cd: pointer to command details structure or NULL
4349 *
4350 * This function removes queues and their corresponding nodes in SW DB
4351 */
4352 int
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4353 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4354 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4355 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4356 struct ice_sq_cd *cd)
4357 {
4358 struct ice_aqc_dis_txq_item *qg_list;
4359 struct ice_q_ctx *q_ctx;
4360 int status = -ENOENT;
4361 struct ice_hw *hw;
4362 u16 i, buf_size;
4363
4364 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4365 return -EIO;
4366
4367 hw = pi->hw;
4368
4369 if (!num_queues) {
4370 /* if queue is disabled already yet the disable queue command
4371 * has to be sent to complete the VF reset, then call
4372 * ice_aq_dis_lan_txq without any queue information
4373 */
4374 if (rst_src)
4375 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4376 vmvf_num, NULL);
4377 return -EIO;
4378 }
4379
4380 buf_size = struct_size(qg_list, q_id, 1);
4381 qg_list = kzalloc(buf_size, GFP_KERNEL);
4382 if (!qg_list)
4383 return -ENOMEM;
4384
4385 mutex_lock(&pi->sched_lock);
4386
4387 for (i = 0; i < num_queues; i++) {
4388 struct ice_sched_node *node;
4389
4390 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4391 if (!node)
4392 continue;
4393 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4394 if (!q_ctx) {
4395 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4396 q_handles[i]);
4397 continue;
4398 }
4399 if (q_ctx->q_handle != q_handles[i]) {
4400 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4401 q_ctx->q_handle, q_handles[i]);
4402 continue;
4403 }
4404 qg_list->parent_teid = node->info.parent_teid;
4405 qg_list->num_qs = 1;
4406 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4407 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4408 vmvf_num, cd);
4409
4410 if (status)
4411 break;
4412 ice_free_sched_node(pi, node);
4413 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4414 }
4415 mutex_unlock(&pi->sched_lock);
4416 kfree(qg_list);
4417 return status;
4418 }
4419
4420 /**
4421 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4422 * @pi: port information structure
4423 * @vsi_handle: software VSI handle
4424 * @tc_bitmap: TC bitmap
4425 * @maxqs: max queues array per TC
4426 * @owner: LAN or RDMA
4427 *
4428 * This function adds/updates the VSI queues per TC.
4429 */
4430 static int
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * maxqs,u8 owner)4431 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4432 u16 *maxqs, u8 owner)
4433 {
4434 int status = 0;
4435 u8 i;
4436
4437 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4438 return -EIO;
4439
4440 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4441 return -EINVAL;
4442
4443 mutex_lock(&pi->sched_lock);
4444
4445 ice_for_each_traffic_class(i) {
4446 /* configuration is possible only if TC node is present */
4447 if (!ice_sched_get_tc_node(pi, i))
4448 continue;
4449
4450 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4451 ice_is_tc_ena(tc_bitmap, i));
4452 if (status)
4453 break;
4454 }
4455
4456 mutex_unlock(&pi->sched_lock);
4457 return status;
4458 }
4459
4460 /**
4461 * ice_cfg_vsi_lan - configure VSI LAN queues
4462 * @pi: port information structure
4463 * @vsi_handle: software VSI handle
4464 * @tc_bitmap: TC bitmap
4465 * @max_lanqs: max LAN queues array per TC
4466 *
4467 * This function adds/updates the VSI LAN queues per TC.
4468 */
4469 int
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * max_lanqs)4470 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4471 u16 *max_lanqs)
4472 {
4473 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4474 ICE_SCHED_NODE_OWNER_LAN);
4475 }
4476
4477 /**
4478 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4479 * @pi: port information structure
4480 * @vsi_handle: software VSI handle
4481 * @tc_bitmap: TC bitmap
4482 * @max_rdmaqs: max RDMA queues array per TC
4483 *
4484 * This function adds/updates the VSI RDMA queues per TC.
4485 */
4486 int
ice_cfg_vsi_rdma(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_rdmaqs)4487 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4488 u16 *max_rdmaqs)
4489 {
4490 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4491 ICE_SCHED_NODE_OWNER_RDMA);
4492 }
4493
4494 /**
4495 * ice_ena_vsi_rdma_qset
4496 * @pi: port information structure
4497 * @vsi_handle: software VSI handle
4498 * @tc: TC number
4499 * @rdma_qset: pointer to RDMA Qset
4500 * @num_qsets: number of RDMA Qsets
4501 * @qset_teid: pointer to Qset node TEIDs
4502 *
4503 * This function adds RDMA Qset
4504 */
4505 int
ice_ena_vsi_rdma_qset(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 * rdma_qset,u16 num_qsets,u32 * qset_teid)4506 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4507 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4508 {
4509 struct ice_aqc_txsched_elem_data node = { 0 };
4510 struct ice_aqc_add_rdma_qset_data *buf;
4511 struct ice_sched_node *parent;
4512 struct ice_hw *hw;
4513 u16 i, buf_size;
4514 int ret;
4515
4516 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4517 return -EIO;
4518 hw = pi->hw;
4519
4520 if (!ice_is_vsi_valid(hw, vsi_handle))
4521 return -EINVAL;
4522
4523 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4524 buf = kzalloc(buf_size, GFP_KERNEL);
4525 if (!buf)
4526 return -ENOMEM;
4527 mutex_lock(&pi->sched_lock);
4528
4529 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4530 ICE_SCHED_NODE_OWNER_RDMA);
4531 if (!parent) {
4532 ret = -EINVAL;
4533 goto rdma_error_exit;
4534 }
4535 buf->parent_teid = parent->info.node_teid;
4536 node.parent_teid = parent->info.node_teid;
4537
4538 buf->num_qsets = cpu_to_le16(num_qsets);
4539 for (i = 0; i < num_qsets; i++) {
4540 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4541 buf->rdma_qsets[i].info.valid_sections =
4542 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4543 ICE_AQC_ELEM_VALID_EIR;
4544 buf->rdma_qsets[i].info.generic = 0;
4545 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4546 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4547 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4548 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4549 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4550 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4551 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4552 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4553 }
4554 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4555 if (ret) {
4556 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4557 goto rdma_error_exit;
4558 }
4559 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4560 for (i = 0; i < num_qsets; i++) {
4561 node.node_teid = buf->rdma_qsets[i].qset_teid;
4562 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4563 &node);
4564 if (ret)
4565 break;
4566 qset_teid[i] = le32_to_cpu(node.node_teid);
4567 }
4568 rdma_error_exit:
4569 mutex_unlock(&pi->sched_lock);
4570 kfree(buf);
4571 return ret;
4572 }
4573
4574 /**
4575 * ice_dis_vsi_rdma_qset - free RDMA resources
4576 * @pi: port_info struct
4577 * @count: number of RDMA Qsets to free
4578 * @qset_teid: TEID of Qset node
4579 * @q_id: list of queue IDs being disabled
4580 */
4581 int
ice_dis_vsi_rdma_qset(struct ice_port_info * pi,u16 count,u32 * qset_teid,u16 * q_id)4582 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4583 u16 *q_id)
4584 {
4585 struct ice_aqc_dis_txq_item *qg_list;
4586 struct ice_hw *hw;
4587 int status = 0;
4588 u16 qg_size;
4589 int i;
4590
4591 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4592 return -EIO;
4593
4594 hw = pi->hw;
4595
4596 qg_size = struct_size(qg_list, q_id, 1);
4597 qg_list = kzalloc(qg_size, GFP_KERNEL);
4598 if (!qg_list)
4599 return -ENOMEM;
4600
4601 mutex_lock(&pi->sched_lock);
4602
4603 for (i = 0; i < count; i++) {
4604 struct ice_sched_node *node;
4605
4606 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4607 if (!node)
4608 continue;
4609
4610 qg_list->parent_teid = node->info.parent_teid;
4611 qg_list->num_qs = 1;
4612 qg_list->q_id[0] =
4613 cpu_to_le16(q_id[i] |
4614 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4615
4616 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4617 ICE_NO_RESET, 0, NULL);
4618 if (status)
4619 break;
4620
4621 ice_free_sched_node(pi, node);
4622 }
4623
4624 mutex_unlock(&pi->sched_lock);
4625 kfree(qg_list);
4626 return status;
4627 }
4628
4629 /**
4630 * ice_replay_pre_init - replay pre initialization
4631 * @hw: pointer to the HW struct
4632 *
4633 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4634 */
ice_replay_pre_init(struct ice_hw * hw)4635 static int ice_replay_pre_init(struct ice_hw *hw)
4636 {
4637 struct ice_switch_info *sw = hw->switch_info;
4638 u8 i;
4639
4640 /* Delete old entries from replay filter list head if there is any */
4641 ice_rm_all_sw_replay_rule_info(hw);
4642 /* In start of replay, move entries into replay_rules list, it
4643 * will allow adding rules entries back to filt_rules list,
4644 * which is operational list.
4645 */
4646 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4647 list_replace_init(&sw->recp_list[i].filt_rules,
4648 &sw->recp_list[i].filt_replay_rules);
4649 ice_sched_replay_agg_vsi_preinit(hw);
4650
4651 return 0;
4652 }
4653
4654 /**
4655 * ice_replay_vsi - replay VSI configuration
4656 * @hw: pointer to the HW struct
4657 * @vsi_handle: driver VSI handle
4658 *
4659 * Restore all VSI configuration after reset. It is required to call this
4660 * function with main VSI first.
4661 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)4662 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4663 {
4664 int status;
4665
4666 if (!ice_is_vsi_valid(hw, vsi_handle))
4667 return -EINVAL;
4668
4669 /* Replay pre-initialization if there is any */
4670 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4671 status = ice_replay_pre_init(hw);
4672 if (status)
4673 return status;
4674 }
4675 /* Replay per VSI all RSS configurations */
4676 status = ice_replay_rss_cfg(hw, vsi_handle);
4677 if (status)
4678 return status;
4679 /* Replay per VSI all filters */
4680 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4681 if (!status)
4682 status = ice_replay_vsi_agg(hw, vsi_handle);
4683 return status;
4684 }
4685
4686 /**
4687 * ice_replay_post - post replay configuration cleanup
4688 * @hw: pointer to the HW struct
4689 *
4690 * Post replay cleanup.
4691 */
ice_replay_post(struct ice_hw * hw)4692 void ice_replay_post(struct ice_hw *hw)
4693 {
4694 /* Delete old entries from replay filter list head */
4695 ice_rm_all_sw_replay_rule_info(hw);
4696 ice_sched_replay_agg(hw);
4697 }
4698
4699 /**
4700 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4701 * @hw: ptr to the hardware info
4702 * @reg: offset of 64 bit HW register to read from
4703 * @prev_stat_loaded: bool to specify if previous stats are loaded
4704 * @prev_stat: ptr to previous loaded stat value
4705 * @cur_stat: ptr to current stat value
4706 */
4707 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4708 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4709 u64 *prev_stat, u64 *cur_stat)
4710 {
4711 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4712
4713 /* device stats are not reset at PFR, they likely will not be zeroed
4714 * when the driver starts. Thus, save the value from the first read
4715 * without adding to the statistic value so that we report stats which
4716 * count up from zero.
4717 */
4718 if (!prev_stat_loaded) {
4719 *prev_stat = new_data;
4720 return;
4721 }
4722
4723 /* Calculate the difference between the new and old values, and then
4724 * add it to the software stat value.
4725 */
4726 if (new_data >= *prev_stat)
4727 *cur_stat += new_data - *prev_stat;
4728 else
4729 /* to manage the potential roll-over */
4730 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4731
4732 /* Update the previously stored value to prepare for next read */
4733 *prev_stat = new_data;
4734 }
4735
4736 /**
4737 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4738 * @hw: ptr to the hardware info
4739 * @reg: offset of HW register to read from
4740 * @prev_stat_loaded: bool to specify if previous stats are loaded
4741 * @prev_stat: ptr to previous loaded stat value
4742 * @cur_stat: ptr to current stat value
4743 */
4744 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4745 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4746 u64 *prev_stat, u64 *cur_stat)
4747 {
4748 u32 new_data;
4749
4750 new_data = rd32(hw, reg);
4751
4752 /* device stats are not reset at PFR, they likely will not be zeroed
4753 * when the driver starts. Thus, save the value from the first read
4754 * without adding to the statistic value so that we report stats which
4755 * count up from zero.
4756 */
4757 if (!prev_stat_loaded) {
4758 *prev_stat = new_data;
4759 return;
4760 }
4761
4762 /* Calculate the difference between the new and old values, and then
4763 * add it to the software stat value.
4764 */
4765 if (new_data >= *prev_stat)
4766 *cur_stat += new_data - *prev_stat;
4767 else
4768 /* to manage the potential roll-over */
4769 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4770
4771 /* Update the previously stored value to prepare for next read */
4772 *prev_stat = new_data;
4773 }
4774
4775 /**
4776 * ice_sched_query_elem - query element information from HW
4777 * @hw: pointer to the HW struct
4778 * @node_teid: node TEID to be queried
4779 * @buf: buffer to element information
4780 *
4781 * This function queries HW element information
4782 */
4783 int
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)4784 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4785 struct ice_aqc_txsched_elem_data *buf)
4786 {
4787 u16 buf_size, num_elem_ret = 0;
4788 int status;
4789
4790 buf_size = sizeof(*buf);
4791 memset(buf, 0, buf_size);
4792 buf->node_teid = cpu_to_le32(node_teid);
4793 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4794 NULL);
4795 if (status || num_elem_ret != 1)
4796 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4797 return status;
4798 }
4799
4800 /**
4801 * ice_aq_read_i2c
4802 * @hw: pointer to the hw struct
4803 * @topo_addr: topology address for a device to communicate with
4804 * @bus_addr: 7-bit I2C bus address
4805 * @addr: I2C memory address (I2C offset) with up to 16 bits
4806 * @params: I2C parameters: bit [7] - Repeated start,
4807 * bits [6:5] data offset size,
4808 * bit [4] - I2C address type,
4809 * bits [3:0] - data size to read (0-16 bytes)
4810 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
4811 * @cd: pointer to command details structure or NULL
4812 *
4813 * Read I2C (0x06E2)
4814 */
4815 int
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)4816 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4817 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4818 struct ice_sq_cd *cd)
4819 {
4820 struct ice_aq_desc desc = { 0 };
4821 struct ice_aqc_i2c *cmd;
4822 u8 data_size;
4823 int status;
4824
4825 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
4826 cmd = &desc.params.read_i2c;
4827
4828 if (!data)
4829 return -EINVAL;
4830
4831 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
4832
4833 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
4834 cmd->topo_addr = topo_addr;
4835 cmd->i2c_params = params;
4836 cmd->i2c_addr = addr;
4837
4838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4839 if (!status) {
4840 struct ice_aqc_read_i2c_resp *resp;
4841 u8 i;
4842
4843 resp = &desc.params.read_i2c_resp;
4844 for (i = 0; i < data_size; i++) {
4845 *data = resp->i2c_data[i];
4846 data++;
4847 }
4848 }
4849
4850 return status;
4851 }
4852
4853 /**
4854 * ice_aq_set_driver_param - Set driver parameter to share via firmware
4855 * @hw: pointer to the HW struct
4856 * @idx: parameter index to set
4857 * @value: the value to set the parameter to
4858 * @cd: pointer to command details structure or NULL
4859 *
4860 * Set the value of one of the software defined parameters. All PFs connected
4861 * to this device can read the value using ice_aq_get_driver_param.
4862 *
4863 * Note that firmware provides no synchronization or locking, and will not
4864 * save the parameter value during a device reset. It is expected that
4865 * a single PF will write the parameter value, while all other PFs will only
4866 * read it.
4867 */
4868 int
ice_aq_set_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 value,struct ice_sq_cd * cd)4869 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4870 u32 value, struct ice_sq_cd *cd)
4871 {
4872 struct ice_aqc_driver_shared_params *cmd;
4873 struct ice_aq_desc desc;
4874
4875 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4876 return -EIO;
4877
4878 cmd = &desc.params.drv_shared_params;
4879
4880 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4881
4882 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4883 cmd->param_indx = idx;
4884 cmd->param_val = cpu_to_le32(value);
4885
4886 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4887 }
4888
4889 /**
4890 * ice_aq_get_driver_param - Get driver parameter shared via firmware
4891 * @hw: pointer to the HW struct
4892 * @idx: parameter index to set
4893 * @value: storage to return the shared parameter
4894 * @cd: pointer to command details structure or NULL
4895 *
4896 * Get the value of one of the software defined parameters.
4897 *
4898 * Note that firmware provides no synchronization or locking. It is expected
4899 * that only a single PF will write a given parameter.
4900 */
4901 int
ice_aq_get_driver_param(struct ice_hw * hw,enum ice_aqc_driver_params idx,u32 * value,struct ice_sq_cd * cd)4902 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4903 u32 *value, struct ice_sq_cd *cd)
4904 {
4905 struct ice_aqc_driver_shared_params *cmd;
4906 struct ice_aq_desc desc;
4907 int status;
4908
4909 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4910 return -EIO;
4911
4912 cmd = &desc.params.drv_shared_params;
4913
4914 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4915
4916 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4917 cmd->param_indx = idx;
4918
4919 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4920 if (status)
4921 return status;
4922
4923 *value = le32_to_cpu(cmd->param_val);
4924
4925 return 0;
4926 }
4927
4928 /**
4929 * ice_aq_set_gpio
4930 * @hw: pointer to the hw struct
4931 * @gpio_ctrl_handle: GPIO controller node handle
4932 * @pin_idx: IO Number of the GPIO that needs to be set
4933 * @value: SW provide IO value to set in the LSB
4934 * @cd: pointer to command details structure or NULL
4935 *
4936 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
4937 */
4938 int
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)4939 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4940 struct ice_sq_cd *cd)
4941 {
4942 struct ice_aqc_gpio *cmd;
4943 struct ice_aq_desc desc;
4944
4945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4946 cmd = &desc.params.read_write_gpio;
4947 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4948 cmd->gpio_num = pin_idx;
4949 cmd->gpio_val = value ? 1 : 0;
4950
4951 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4952 }
4953
4954 /**
4955 * ice_aq_get_gpio
4956 * @hw: pointer to the hw struct
4957 * @gpio_ctrl_handle: GPIO controller node handle
4958 * @pin_idx: IO Number of the GPIO that needs to be set
4959 * @value: IO value read
4960 * @cd: pointer to command details structure or NULL
4961 *
4962 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
4963 * the topology
4964 */
4965 int
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)4966 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
4967 bool *value, struct ice_sq_cd *cd)
4968 {
4969 struct ice_aqc_gpio *cmd;
4970 struct ice_aq_desc desc;
4971 int status;
4972
4973 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
4974 cmd = &desc.params.read_write_gpio;
4975 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4976 cmd->gpio_num = pin_idx;
4977
4978 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4979 if (status)
4980 return status;
4981
4982 *value = !!cmd->gpio_val;
4983 return 0;
4984 }
4985
4986 /**
4987 * ice_fw_supports_link_override
4988 * @hw: pointer to the hardware structure
4989 *
4990 * Checks if the firmware supports link override
4991 */
ice_fw_supports_link_override(struct ice_hw * hw)4992 bool ice_fw_supports_link_override(struct ice_hw *hw)
4993 {
4994 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4995 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4996 return true;
4997 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4998 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4999 return true;
5000 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5001 return true;
5002 }
5003
5004 return false;
5005 }
5006
5007 /**
5008 * ice_get_link_default_override
5009 * @ldo: pointer to the link default override struct
5010 * @pi: pointer to the port info struct
5011 *
5012 * Gets the link default override for a port
5013 */
5014 int
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)5015 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5016 struct ice_port_info *pi)
5017 {
5018 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5019 struct ice_hw *hw = pi->hw;
5020 int status;
5021
5022 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5023 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5024 if (status) {
5025 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5026 return status;
5027 }
5028
5029 /* Each port has its own config; calculate for our port */
5030 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5031 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5032
5033 /* link options first */
5034 status = ice_read_sr_word(hw, tlv_start, &buf);
5035 if (status) {
5036 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5037 return status;
5038 }
5039 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5040 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5041 ICE_LINK_OVERRIDE_PHY_CFG_S;
5042
5043 /* link PHY config */
5044 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5045 status = ice_read_sr_word(hw, offset, &buf);
5046 if (status) {
5047 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5048 return status;
5049 }
5050 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5051
5052 /* PHY types low */
5053 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5054 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5055 status = ice_read_sr_word(hw, (offset + i), &buf);
5056 if (status) {
5057 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5058 return status;
5059 }
5060 /* shift 16 bits at a time to fill 64 bits */
5061 ldo->phy_type_low |= ((u64)buf << (i * 16));
5062 }
5063
5064 /* PHY types high */
5065 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5066 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5067 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5068 status = ice_read_sr_word(hw, (offset + i), &buf);
5069 if (status) {
5070 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5071 return status;
5072 }
5073 /* shift 16 bits at a time to fill 64 bits */
5074 ldo->phy_type_high |= ((u64)buf << (i * 16));
5075 }
5076
5077 return status;
5078 }
5079
5080 /**
5081 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5082 * @caps: get PHY capability data
5083 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)5084 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5085 {
5086 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5087 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5088 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5089 ICE_AQC_PHY_AN_EN_CLAUSE37))
5090 return true;
5091
5092 return false;
5093 }
5094
5095 /**
5096 * ice_aq_set_lldp_mib - Set the LLDP MIB
5097 * @hw: pointer to the HW struct
5098 * @mib_type: Local, Remote or both Local and Remote MIBs
5099 * @buf: pointer to the caller-supplied buffer to store the MIB block
5100 * @buf_size: size of the buffer (in bytes)
5101 * @cd: pointer to command details structure or NULL
5102 *
5103 * Set the LLDP MIB. (0x0A08)
5104 */
5105 int
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)5106 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5107 struct ice_sq_cd *cd)
5108 {
5109 struct ice_aqc_lldp_set_local_mib *cmd;
5110 struct ice_aq_desc desc;
5111
5112 cmd = &desc.params.lldp_set_mib;
5113
5114 if (buf_size == 0 || !buf)
5115 return -EINVAL;
5116
5117 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5118
5119 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5120 desc.datalen = cpu_to_le16(buf_size);
5121
5122 cmd->type = mib_type;
5123 cmd->length = cpu_to_le16(buf_size);
5124
5125 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5126 }
5127
5128 /**
5129 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5130 * @hw: pointer to HW struct
5131 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)5132 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5133 {
5134 if (hw->mac_type != ICE_MAC_E810)
5135 return false;
5136
5137 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5138 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5139 return true;
5140 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5141 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5142 return true;
5143 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5144 return true;
5145 }
5146 return false;
5147 }
5148
5149 /**
5150 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5151 * @hw: pointer to HW struct
5152 * @vsi_num: absolute HW index for VSI
5153 * @add: boolean for if adding or removing a filter
5154 */
5155 int
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)5156 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5157 {
5158 struct ice_aqc_lldp_filter_ctrl *cmd;
5159 struct ice_aq_desc desc;
5160
5161 cmd = &desc.params.lldp_filter_ctrl;
5162
5163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5164
5165 if (add)
5166 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5167 else
5168 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5169
5170 cmd->vsi_num = cpu_to_le16(vsi_num);
5171
5172 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5173 }
5174
5175 /**
5176 * ice_fw_supports_report_dflt_cfg
5177 * @hw: pointer to the hardware structure
5178 *
5179 * Checks if the firmware supports report default configuration
5180 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)5181 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5182 {
5183 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5184 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5185 return true;
5186 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5187 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5188 return true;
5189 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5190 return true;
5191 }
5192 return false;
5193 }
5194