1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4 #include "i40e.h"
5 #include "i40e_type.h"
6 #include "i40e_adminq.h"
7 #include "i40e_prototype.h"
8 #include <linux/avf/virtchnl.h>
9
10 /**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
i40e_set_mac_type(struct i40e_hw * hw)17 i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18 {
19 i40e_status status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_5G_BASE_T_BC:
31 case I40E_DEV_ID_10G_BASE_T:
32 case I40E_DEV_ID_10G_BASE_T4:
33 case I40E_DEV_ID_10G_BASE_T_BC:
34 case I40E_DEV_ID_10G_B:
35 case I40E_DEV_ID_10G_SFP:
36 case I40E_DEV_ID_20G_KR2:
37 case I40E_DEV_ID_20G_KR2_A:
38 case I40E_DEV_ID_25G_B:
39 case I40E_DEV_ID_25G_SFP28:
40 case I40E_DEV_ID_X710_N3000:
41 case I40E_DEV_ID_XXV710_N3000:
42 hw->mac.type = I40E_MAC_XL710;
43 break;
44 case I40E_DEV_ID_KX_X722:
45 case I40E_DEV_ID_QSFP_X722:
46 case I40E_DEV_ID_SFP_X722:
47 case I40E_DEV_ID_1G_BASE_T_X722:
48 case I40E_DEV_ID_10G_BASE_T_X722:
49 case I40E_DEV_ID_SFP_I_X722:
50 case I40E_DEV_ID_SFP_X722_A:
51 hw->mac.type = I40E_MAC_X722;
52 break;
53 default:
54 hw->mac.type = I40E_MAC_GENERIC;
55 break;
56 }
57 } else {
58 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
59 }
60
61 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
62 hw->mac.type, status);
63 return status;
64 }
65
66 /**
67 * i40e_aq_str - convert AQ err code to a string
68 * @hw: pointer to the HW structure
69 * @aq_err: the AQ error code to convert
70 **/
i40e_aq_str(struct i40e_hw * hw,enum i40e_admin_queue_err aq_err)71 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
72 {
73 switch (aq_err) {
74 case I40E_AQ_RC_OK:
75 return "OK";
76 case I40E_AQ_RC_EPERM:
77 return "I40E_AQ_RC_EPERM";
78 case I40E_AQ_RC_ENOENT:
79 return "I40E_AQ_RC_ENOENT";
80 case I40E_AQ_RC_ESRCH:
81 return "I40E_AQ_RC_ESRCH";
82 case I40E_AQ_RC_EINTR:
83 return "I40E_AQ_RC_EINTR";
84 case I40E_AQ_RC_EIO:
85 return "I40E_AQ_RC_EIO";
86 case I40E_AQ_RC_ENXIO:
87 return "I40E_AQ_RC_ENXIO";
88 case I40E_AQ_RC_E2BIG:
89 return "I40E_AQ_RC_E2BIG";
90 case I40E_AQ_RC_EAGAIN:
91 return "I40E_AQ_RC_EAGAIN";
92 case I40E_AQ_RC_ENOMEM:
93 return "I40E_AQ_RC_ENOMEM";
94 case I40E_AQ_RC_EACCES:
95 return "I40E_AQ_RC_EACCES";
96 case I40E_AQ_RC_EFAULT:
97 return "I40E_AQ_RC_EFAULT";
98 case I40E_AQ_RC_EBUSY:
99 return "I40E_AQ_RC_EBUSY";
100 case I40E_AQ_RC_EEXIST:
101 return "I40E_AQ_RC_EEXIST";
102 case I40E_AQ_RC_EINVAL:
103 return "I40E_AQ_RC_EINVAL";
104 case I40E_AQ_RC_ENOTTY:
105 return "I40E_AQ_RC_ENOTTY";
106 case I40E_AQ_RC_ENOSPC:
107 return "I40E_AQ_RC_ENOSPC";
108 case I40E_AQ_RC_ENOSYS:
109 return "I40E_AQ_RC_ENOSYS";
110 case I40E_AQ_RC_ERANGE:
111 return "I40E_AQ_RC_ERANGE";
112 case I40E_AQ_RC_EFLUSHED:
113 return "I40E_AQ_RC_EFLUSHED";
114 case I40E_AQ_RC_BAD_ADDR:
115 return "I40E_AQ_RC_BAD_ADDR";
116 case I40E_AQ_RC_EMODE:
117 return "I40E_AQ_RC_EMODE";
118 case I40E_AQ_RC_EFBIG:
119 return "I40E_AQ_RC_EFBIG";
120 }
121
122 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
123 return hw->err_str;
124 }
125
126 /**
127 * i40e_stat_str - convert status err code to a string
128 * @hw: pointer to the HW structure
129 * @stat_err: the status error code to convert
130 **/
i40e_stat_str(struct i40e_hw * hw,i40e_status stat_err)131 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
132 {
133 switch (stat_err) {
134 case 0:
135 return "OK";
136 case I40E_ERR_NVM:
137 return "I40E_ERR_NVM";
138 case I40E_ERR_NVM_CHECKSUM:
139 return "I40E_ERR_NVM_CHECKSUM";
140 case I40E_ERR_PHY:
141 return "I40E_ERR_PHY";
142 case I40E_ERR_CONFIG:
143 return "I40E_ERR_CONFIG";
144 case I40E_ERR_PARAM:
145 return "I40E_ERR_PARAM";
146 case I40E_ERR_MAC_TYPE:
147 return "I40E_ERR_MAC_TYPE";
148 case I40E_ERR_UNKNOWN_PHY:
149 return "I40E_ERR_UNKNOWN_PHY";
150 case I40E_ERR_LINK_SETUP:
151 return "I40E_ERR_LINK_SETUP";
152 case I40E_ERR_ADAPTER_STOPPED:
153 return "I40E_ERR_ADAPTER_STOPPED";
154 case I40E_ERR_INVALID_MAC_ADDR:
155 return "I40E_ERR_INVALID_MAC_ADDR";
156 case I40E_ERR_DEVICE_NOT_SUPPORTED:
157 return "I40E_ERR_DEVICE_NOT_SUPPORTED";
158 case I40E_ERR_PRIMARY_REQUESTS_PENDING:
159 return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
160 case I40E_ERR_INVALID_LINK_SETTINGS:
161 return "I40E_ERR_INVALID_LINK_SETTINGS";
162 case I40E_ERR_AUTONEG_NOT_COMPLETE:
163 return "I40E_ERR_AUTONEG_NOT_COMPLETE";
164 case I40E_ERR_RESET_FAILED:
165 return "I40E_ERR_RESET_FAILED";
166 case I40E_ERR_SWFW_SYNC:
167 return "I40E_ERR_SWFW_SYNC";
168 case I40E_ERR_NO_AVAILABLE_VSI:
169 return "I40E_ERR_NO_AVAILABLE_VSI";
170 case I40E_ERR_NO_MEMORY:
171 return "I40E_ERR_NO_MEMORY";
172 case I40E_ERR_BAD_PTR:
173 return "I40E_ERR_BAD_PTR";
174 case I40E_ERR_RING_FULL:
175 return "I40E_ERR_RING_FULL";
176 case I40E_ERR_INVALID_PD_ID:
177 return "I40E_ERR_INVALID_PD_ID";
178 case I40E_ERR_INVALID_QP_ID:
179 return "I40E_ERR_INVALID_QP_ID";
180 case I40E_ERR_INVALID_CQ_ID:
181 return "I40E_ERR_INVALID_CQ_ID";
182 case I40E_ERR_INVALID_CEQ_ID:
183 return "I40E_ERR_INVALID_CEQ_ID";
184 case I40E_ERR_INVALID_AEQ_ID:
185 return "I40E_ERR_INVALID_AEQ_ID";
186 case I40E_ERR_INVALID_SIZE:
187 return "I40E_ERR_INVALID_SIZE";
188 case I40E_ERR_INVALID_ARP_INDEX:
189 return "I40E_ERR_INVALID_ARP_INDEX";
190 case I40E_ERR_INVALID_FPM_FUNC_ID:
191 return "I40E_ERR_INVALID_FPM_FUNC_ID";
192 case I40E_ERR_QP_INVALID_MSG_SIZE:
193 return "I40E_ERR_QP_INVALID_MSG_SIZE";
194 case I40E_ERR_QP_TOOMANY_WRS_POSTED:
195 return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
196 case I40E_ERR_INVALID_FRAG_COUNT:
197 return "I40E_ERR_INVALID_FRAG_COUNT";
198 case I40E_ERR_QUEUE_EMPTY:
199 return "I40E_ERR_QUEUE_EMPTY";
200 case I40E_ERR_INVALID_ALIGNMENT:
201 return "I40E_ERR_INVALID_ALIGNMENT";
202 case I40E_ERR_FLUSHED_QUEUE:
203 return "I40E_ERR_FLUSHED_QUEUE";
204 case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
205 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
206 case I40E_ERR_INVALID_IMM_DATA_SIZE:
207 return "I40E_ERR_INVALID_IMM_DATA_SIZE";
208 case I40E_ERR_TIMEOUT:
209 return "I40E_ERR_TIMEOUT";
210 case I40E_ERR_OPCODE_MISMATCH:
211 return "I40E_ERR_OPCODE_MISMATCH";
212 case I40E_ERR_CQP_COMPL_ERROR:
213 return "I40E_ERR_CQP_COMPL_ERROR";
214 case I40E_ERR_INVALID_VF_ID:
215 return "I40E_ERR_INVALID_VF_ID";
216 case I40E_ERR_INVALID_HMCFN_ID:
217 return "I40E_ERR_INVALID_HMCFN_ID";
218 case I40E_ERR_BACKING_PAGE_ERROR:
219 return "I40E_ERR_BACKING_PAGE_ERROR";
220 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
221 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
222 case I40E_ERR_INVALID_PBLE_INDEX:
223 return "I40E_ERR_INVALID_PBLE_INDEX";
224 case I40E_ERR_INVALID_SD_INDEX:
225 return "I40E_ERR_INVALID_SD_INDEX";
226 case I40E_ERR_INVALID_PAGE_DESC_INDEX:
227 return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
228 case I40E_ERR_INVALID_SD_TYPE:
229 return "I40E_ERR_INVALID_SD_TYPE";
230 case I40E_ERR_MEMCPY_FAILED:
231 return "I40E_ERR_MEMCPY_FAILED";
232 case I40E_ERR_INVALID_HMC_OBJ_INDEX:
233 return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
234 case I40E_ERR_INVALID_HMC_OBJ_COUNT:
235 return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
236 case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
237 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
238 case I40E_ERR_SRQ_ENABLED:
239 return "I40E_ERR_SRQ_ENABLED";
240 case I40E_ERR_ADMIN_QUEUE_ERROR:
241 return "I40E_ERR_ADMIN_QUEUE_ERROR";
242 case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
243 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
244 case I40E_ERR_BUF_TOO_SHORT:
245 return "I40E_ERR_BUF_TOO_SHORT";
246 case I40E_ERR_ADMIN_QUEUE_FULL:
247 return "I40E_ERR_ADMIN_QUEUE_FULL";
248 case I40E_ERR_ADMIN_QUEUE_NO_WORK:
249 return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
250 case I40E_ERR_BAD_IWARP_CQE:
251 return "I40E_ERR_BAD_IWARP_CQE";
252 case I40E_ERR_NVM_BLANK_MODE:
253 return "I40E_ERR_NVM_BLANK_MODE";
254 case I40E_ERR_NOT_IMPLEMENTED:
255 return "I40E_ERR_NOT_IMPLEMENTED";
256 case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
257 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
258 case I40E_ERR_DIAG_TEST_FAILED:
259 return "I40E_ERR_DIAG_TEST_FAILED";
260 case I40E_ERR_NOT_READY:
261 return "I40E_ERR_NOT_READY";
262 case I40E_NOT_SUPPORTED:
263 return "I40E_NOT_SUPPORTED";
264 case I40E_ERR_FIRMWARE_API_VERSION:
265 return "I40E_ERR_FIRMWARE_API_VERSION";
266 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
267 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
268 }
269
270 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
271 return hw->err_str;
272 }
273
274 /**
275 * i40e_debug_aq
276 * @hw: debug mask related to admin queue
277 * @mask: debug mask
278 * @desc: pointer to admin queue descriptor
279 * @buffer: pointer to command buffer
280 * @buf_len: max length of buffer
281 *
282 * Dumps debug log about adminq command with descriptor contents.
283 **/
i40e_debug_aq(struct i40e_hw * hw,enum i40e_debug_mask mask,void * desc,void * buffer,u16 buf_len)284 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
285 void *buffer, u16 buf_len)
286 {
287 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
288 u32 effective_mask = hw->debug_mask & mask;
289 char prefix[27];
290 u16 len;
291 u8 *buf = (u8 *)buffer;
292
293 if (!effective_mask || !desc)
294 return;
295
296 len = le16_to_cpu(aq_desc->datalen);
297
298 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
299 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
300 le16_to_cpu(aq_desc->opcode),
301 le16_to_cpu(aq_desc->flags),
302 le16_to_cpu(aq_desc->datalen),
303 le16_to_cpu(aq_desc->retval));
304 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
305 "\tcookie (h,l) 0x%08X 0x%08X\n",
306 le32_to_cpu(aq_desc->cookie_high),
307 le32_to_cpu(aq_desc->cookie_low));
308 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
309 "\tparam (0,1) 0x%08X 0x%08X\n",
310 le32_to_cpu(aq_desc->params.internal.param0),
311 le32_to_cpu(aq_desc->params.internal.param1));
312 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
313 "\taddr (h,l) 0x%08X 0x%08X\n",
314 le32_to_cpu(aq_desc->params.external.addr_high),
315 le32_to_cpu(aq_desc->params.external.addr_low));
316
317 if (buffer && buf_len != 0 && len != 0 &&
318 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
319 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
320 if (buf_len < len)
321 len = buf_len;
322
323 snprintf(prefix, sizeof(prefix),
324 "i40e %02x:%02x.%x: \t0x",
325 hw->bus.bus_id,
326 hw->bus.device,
327 hw->bus.func);
328
329 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
330 16, 1, buf, len, false);
331 }
332 }
333
334 /**
335 * i40e_check_asq_alive
336 * @hw: pointer to the hw struct
337 *
338 * Returns true if Queue is enabled else false.
339 **/
i40e_check_asq_alive(struct i40e_hw * hw)340 bool i40e_check_asq_alive(struct i40e_hw *hw)
341 {
342 if (hw->aq.asq.len)
343 return !!(rd32(hw, hw->aq.asq.len) &
344 I40E_PF_ATQLEN_ATQENABLE_MASK);
345 else
346 return false;
347 }
348
349 /**
350 * i40e_aq_queue_shutdown
351 * @hw: pointer to the hw struct
352 * @unloading: is the driver unloading itself
353 *
354 * Tell the Firmware that we're shutting down the AdminQ and whether
355 * or not the driver is unloading as well.
356 **/
i40e_aq_queue_shutdown(struct i40e_hw * hw,bool unloading)357 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
358 bool unloading)
359 {
360 struct i40e_aq_desc desc;
361 struct i40e_aqc_queue_shutdown *cmd =
362 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
363 i40e_status status;
364
365 i40e_fill_default_direct_cmd_desc(&desc,
366 i40e_aqc_opc_queue_shutdown);
367
368 if (unloading)
369 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
370 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
371
372 return status;
373 }
374
375 /**
376 * i40e_aq_get_set_rss_lut
377 * @hw: pointer to the hardware structure
378 * @vsi_id: vsi fw index
379 * @pf_lut: for PF table set true, for VSI table set false
380 * @lut: pointer to the lut buffer provided by the caller
381 * @lut_size: size of the lut buffer
382 * @set: set true to set the table, false to get the table
383 *
384 * Internal function to get or set RSS look up table
385 **/
i40e_aq_get_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size,bool set)386 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
387 u16 vsi_id, bool pf_lut,
388 u8 *lut, u16 lut_size,
389 bool set)
390 {
391 i40e_status status;
392 struct i40e_aq_desc desc;
393 struct i40e_aqc_get_set_rss_lut *cmd_resp =
394 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
395
396 if (set)
397 i40e_fill_default_direct_cmd_desc(&desc,
398 i40e_aqc_opc_set_rss_lut);
399 else
400 i40e_fill_default_direct_cmd_desc(&desc,
401 i40e_aqc_opc_get_rss_lut);
402
403 /* Indirect command */
404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
406
407 cmd_resp->vsi_id =
408 cpu_to_le16((u16)((vsi_id <<
409 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
410 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
411 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
412
413 if (pf_lut)
414 cmd_resp->flags |= cpu_to_le16((u16)
415 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
416 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
418 else
419 cmd_resp->flags |= cpu_to_le16((u16)
420 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
421 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
422 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
423
424 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
425
426 return status;
427 }
428
429 /**
430 * i40e_aq_get_rss_lut
431 * @hw: pointer to the hardware structure
432 * @vsi_id: vsi fw index
433 * @pf_lut: for PF table set true, for VSI table set false
434 * @lut: pointer to the lut buffer provided by the caller
435 * @lut_size: size of the lut buffer
436 *
437 * get the RSS lookup table, PF or VSI type
438 **/
i40e_aq_get_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)439 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
440 bool pf_lut, u8 *lut, u16 lut_size)
441 {
442 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
443 false);
444 }
445
446 /**
447 * i40e_aq_set_rss_lut
448 * @hw: pointer to the hardware structure
449 * @vsi_id: vsi fw index
450 * @pf_lut: for PF table set true, for VSI table set false
451 * @lut: pointer to the lut buffer provided by the caller
452 * @lut_size: size of the lut buffer
453 *
454 * set the RSS lookup table, PF or VSI type
455 **/
i40e_aq_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)456 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
457 bool pf_lut, u8 *lut, u16 lut_size)
458 {
459 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
460 }
461
462 /**
463 * i40e_aq_get_set_rss_key
464 * @hw: pointer to the hw struct
465 * @vsi_id: vsi fw index
466 * @key: pointer to key info struct
467 * @set: set true to set the key, false to get the key
468 *
469 * get the RSS key per VSI
470 **/
i40e_aq_get_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key,bool set)471 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
472 u16 vsi_id,
473 struct i40e_aqc_get_set_rss_key_data *key,
474 bool set)
475 {
476 i40e_status status;
477 struct i40e_aq_desc desc;
478 struct i40e_aqc_get_set_rss_key *cmd_resp =
479 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
480 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
481
482 if (set)
483 i40e_fill_default_direct_cmd_desc(&desc,
484 i40e_aqc_opc_set_rss_key);
485 else
486 i40e_fill_default_direct_cmd_desc(&desc,
487 i40e_aqc_opc_get_rss_key);
488
489 /* Indirect command */
490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
492
493 cmd_resp->vsi_id =
494 cpu_to_le16((u16)((vsi_id <<
495 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
496 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
497 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
498
499 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
500
501 return status;
502 }
503
504 /**
505 * i40e_aq_get_rss_key
506 * @hw: pointer to the hw struct
507 * @vsi_id: vsi fw index
508 * @key: pointer to key info struct
509 *
510 **/
i40e_aq_get_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)511 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
512 u16 vsi_id,
513 struct i40e_aqc_get_set_rss_key_data *key)
514 {
515 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
516 }
517
518 /**
519 * i40e_aq_set_rss_key
520 * @hw: pointer to the hw struct
521 * @vsi_id: vsi fw index
522 * @key: pointer to key info struct
523 *
524 * set the RSS key per VSI
525 **/
i40e_aq_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)526 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
527 u16 vsi_id,
528 struct i40e_aqc_get_set_rss_key_data *key)
529 {
530 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
531 }
532
533 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
534 * hardware to a bit-field that can be used by SW to more easily determine the
535 * packet type.
536 *
537 * Macros are used to shorten the table lines and make this table human
538 * readable.
539 *
540 * We store the PTYPE in the top byte of the bit field - this is just so that
541 * we can check that the table doesn't have a row missing, as the index into
542 * the table should be the PTYPE.
543 *
544 * Typical work flow:
545 *
546 * IF NOT i40e_ptype_lookup[ptype].known
547 * THEN
548 * Packet is unknown
549 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
550 * Use the rest of the fields to look at the tunnels, inner protocols, etc
551 * ELSE
552 * Use the enum i40e_rx_l2_ptype to decode the packet type
553 * ENDIF
554 */
555
556 /* macro to make the table lines short, use explicit indexing with [PTYPE] */
557 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
558 [PTYPE] = { \
559 1, \
560 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
561 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
562 I40E_RX_PTYPE_##OUTER_FRAG, \
563 I40E_RX_PTYPE_TUNNEL_##T, \
564 I40E_RX_PTYPE_TUNNEL_END_##TE, \
565 I40E_RX_PTYPE_##TEF, \
566 I40E_RX_PTYPE_INNER_PROT_##I, \
567 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
568
569 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
570
571 /* shorter macros makes the table fit but are terse */
572 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
573 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
574 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
575
576 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
577 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
578 /* L2 Packet types */
579 I40E_PTT_UNUSED_ENTRY(0),
580 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
582 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
583 I40E_PTT_UNUSED_ENTRY(4),
584 I40E_PTT_UNUSED_ENTRY(5),
585 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
586 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
587 I40E_PTT_UNUSED_ENTRY(8),
588 I40E_PTT_UNUSED_ENTRY(9),
589 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
590 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
591 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
600 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
601
602 /* Non Tunneled IPv4 */
603 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
604 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
605 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
606 I40E_PTT_UNUSED_ENTRY(25),
607 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
608 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
609 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
610
611 /* IPv4 --> IPv4 */
612 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
613 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
614 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
615 I40E_PTT_UNUSED_ENTRY(32),
616 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
617 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
618 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
619
620 /* IPv4 --> IPv6 */
621 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
622 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
623 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
624 I40E_PTT_UNUSED_ENTRY(39),
625 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
626 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
627 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
628
629 /* IPv4 --> GRE/NAT */
630 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
631
632 /* IPv4 --> GRE/NAT --> IPv4 */
633 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
634 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
635 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
636 I40E_PTT_UNUSED_ENTRY(47),
637 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
638 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
639 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
640
641 /* IPv4 --> GRE/NAT --> IPv6 */
642 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
643 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
644 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
645 I40E_PTT_UNUSED_ENTRY(54),
646 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
647 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
648 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
649
650 /* IPv4 --> GRE/NAT --> MAC */
651 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
652
653 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
654 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
655 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
656 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
657 I40E_PTT_UNUSED_ENTRY(62),
658 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
659 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
660 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
661
662 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
663 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
664 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
665 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
666 I40E_PTT_UNUSED_ENTRY(69),
667 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
668 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
669 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
670
671 /* IPv4 --> GRE/NAT --> MAC/VLAN */
672 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
673
674 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
675 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
676 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
677 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
678 I40E_PTT_UNUSED_ENTRY(77),
679 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
680 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
681 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
682
683 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
684 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
685 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
686 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
687 I40E_PTT_UNUSED_ENTRY(84),
688 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
689 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
690 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
691
692 /* Non Tunneled IPv6 */
693 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
694 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
695 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
696 I40E_PTT_UNUSED_ENTRY(91),
697 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
698 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
699 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
700
701 /* IPv6 --> IPv4 */
702 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
703 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
704 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
705 I40E_PTT_UNUSED_ENTRY(98),
706 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
707 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
708 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
709
710 /* IPv6 --> IPv6 */
711 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
712 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
713 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
714 I40E_PTT_UNUSED_ENTRY(105),
715 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
716 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
717 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
718
719 /* IPv6 --> GRE/NAT */
720 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
721
722 /* IPv6 --> GRE/NAT -> IPv4 */
723 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
724 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
725 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
726 I40E_PTT_UNUSED_ENTRY(113),
727 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
728 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
729 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
730
731 /* IPv6 --> GRE/NAT -> IPv6 */
732 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
733 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
734 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
735 I40E_PTT_UNUSED_ENTRY(120),
736 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
737 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
738 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
739
740 /* IPv6 --> GRE/NAT -> MAC */
741 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
742
743 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
744 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
745 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
746 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
747 I40E_PTT_UNUSED_ENTRY(128),
748 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
749 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
750 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
751
752 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
753 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
754 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
755 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
756 I40E_PTT_UNUSED_ENTRY(135),
757 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
758 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
759 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
760
761 /* IPv6 --> GRE/NAT -> MAC/VLAN */
762 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
763
764 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
765 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
766 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
767 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
768 I40E_PTT_UNUSED_ENTRY(143),
769 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
770 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
771 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
772
773 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
774 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
775 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
776 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
777 I40E_PTT_UNUSED_ENTRY(150),
778 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
779 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
780 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
781
782 /* unused entries */
783 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
784 };
785
786 /**
787 * i40e_init_shared_code - Initialize the shared code
788 * @hw: pointer to hardware structure
789 *
790 * This assigns the MAC type and PHY code and inits the NVM.
791 * Does not touch the hardware. This function must be called prior to any
792 * other function in the shared code. The i40e_hw structure should be
793 * memset to 0 prior to calling this function. The following fields in
794 * hw structure should be filled in prior to calling this function:
795 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
796 * subsystem_vendor_id, and revision_id
797 **/
i40e_init_shared_code(struct i40e_hw * hw)798 i40e_status i40e_init_shared_code(struct i40e_hw *hw)
799 {
800 i40e_status status = 0;
801 u32 port, ari, func_rid;
802
803 i40e_set_mac_type(hw);
804
805 switch (hw->mac.type) {
806 case I40E_MAC_XL710:
807 case I40E_MAC_X722:
808 break;
809 default:
810 return I40E_ERR_DEVICE_NOT_SUPPORTED;
811 }
812
813 hw->phy.get_link_info = true;
814
815 /* Determine port number and PF number*/
816 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
817 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
818 hw->port = (u8)port;
819 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
820 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
821 func_rid = rd32(hw, I40E_PF_FUNC_RID);
822 if (ari)
823 hw->pf_id = (u8)(func_rid & 0xff);
824 else
825 hw->pf_id = (u8)(func_rid & 0x7);
826
827 status = i40e_init_nvm(hw);
828 return status;
829 }
830
831 /**
832 * i40e_aq_mac_address_read - Retrieve the MAC addresses
833 * @hw: pointer to the hw struct
834 * @flags: a return indicator of what addresses were added to the addr store
835 * @addrs: the requestor's mac addr store
836 * @cmd_details: pointer to command details structure or NULL
837 **/
i40e_aq_mac_address_read(struct i40e_hw * hw,u16 * flags,struct i40e_aqc_mac_address_read_data * addrs,struct i40e_asq_cmd_details * cmd_details)838 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
839 u16 *flags,
840 struct i40e_aqc_mac_address_read_data *addrs,
841 struct i40e_asq_cmd_details *cmd_details)
842 {
843 struct i40e_aq_desc desc;
844 struct i40e_aqc_mac_address_read *cmd_data =
845 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
846 i40e_status status;
847
848 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
849 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
850
851 status = i40e_asq_send_command(hw, &desc, addrs,
852 sizeof(*addrs), cmd_details);
853 *flags = le16_to_cpu(cmd_data->command_flags);
854
855 return status;
856 }
857
858 /**
859 * i40e_aq_mac_address_write - Change the MAC addresses
860 * @hw: pointer to the hw struct
861 * @flags: indicates which MAC to be written
862 * @mac_addr: address to write
863 * @cmd_details: pointer to command details structure or NULL
864 **/
i40e_aq_mac_address_write(struct i40e_hw * hw,u16 flags,u8 * mac_addr,struct i40e_asq_cmd_details * cmd_details)865 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
866 u16 flags, u8 *mac_addr,
867 struct i40e_asq_cmd_details *cmd_details)
868 {
869 struct i40e_aq_desc desc;
870 struct i40e_aqc_mac_address_write *cmd_data =
871 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
872 i40e_status status;
873
874 i40e_fill_default_direct_cmd_desc(&desc,
875 i40e_aqc_opc_mac_address_write);
876 cmd_data->command_flags = cpu_to_le16(flags);
877 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
878 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
879 ((u32)mac_addr[3] << 16) |
880 ((u32)mac_addr[4] << 8) |
881 mac_addr[5]);
882
883 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
884
885 return status;
886 }
887
888 /**
889 * i40e_get_mac_addr - get MAC address
890 * @hw: pointer to the HW structure
891 * @mac_addr: pointer to MAC address
892 *
893 * Reads the adapter's MAC address from register
894 **/
i40e_get_mac_addr(struct i40e_hw * hw,u8 * mac_addr)895 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
896 {
897 struct i40e_aqc_mac_address_read_data addrs;
898 i40e_status status;
899 u16 flags = 0;
900
901 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
902
903 if (flags & I40E_AQC_LAN_ADDR_VALID)
904 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
905
906 return status;
907 }
908
909 /**
910 * i40e_get_port_mac_addr - get Port MAC address
911 * @hw: pointer to the HW structure
912 * @mac_addr: pointer to Port MAC address
913 *
914 * Reads the adapter's Port MAC address
915 **/
i40e_get_port_mac_addr(struct i40e_hw * hw,u8 * mac_addr)916 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
917 {
918 struct i40e_aqc_mac_address_read_data addrs;
919 i40e_status status;
920 u16 flags = 0;
921
922 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
923 if (status)
924 return status;
925
926 if (flags & I40E_AQC_PORT_ADDR_VALID)
927 ether_addr_copy(mac_addr, addrs.port_mac);
928 else
929 status = I40E_ERR_INVALID_MAC_ADDR;
930
931 return status;
932 }
933
934 /**
935 * i40e_pre_tx_queue_cfg - pre tx queue configure
936 * @hw: pointer to the HW structure
937 * @queue: target PF queue index
938 * @enable: state change request
939 *
940 * Handles hw requirement to indicate intention to enable
941 * or disable target queue.
942 **/
i40e_pre_tx_queue_cfg(struct i40e_hw * hw,u32 queue,bool enable)943 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
944 {
945 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
946 u32 reg_block = 0;
947 u32 reg_val;
948
949 if (abs_queue_idx >= 128) {
950 reg_block = abs_queue_idx / 128;
951 abs_queue_idx %= 128;
952 }
953
954 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
955 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
956 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
957
958 if (enable)
959 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
960 else
961 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
962
963 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
964 }
965
966 /**
967 * i40e_read_pba_string - Reads part number string from EEPROM
968 * @hw: pointer to hardware structure
969 * @pba_num: stores the part number string from the EEPROM
970 * @pba_num_size: part number string buffer length
971 *
972 * Reads the part number string from the EEPROM.
973 **/
i40e_read_pba_string(struct i40e_hw * hw,u8 * pba_num,u32 pba_num_size)974 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
975 u32 pba_num_size)
976 {
977 i40e_status status = 0;
978 u16 pba_word = 0;
979 u16 pba_size = 0;
980 u16 pba_ptr = 0;
981 u16 i = 0;
982
983 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
984 if (status || (pba_word != 0xFAFA)) {
985 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
986 return status;
987 }
988
989 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
990 if (status) {
991 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
992 return status;
993 }
994
995 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
996 if (status) {
997 hw_dbg(hw, "Failed to read PBA Block size.\n");
998 return status;
999 }
1000
1001 /* Subtract one to get PBA word count (PBA Size word is included in
1002 * total size)
1003 */
1004 pba_size--;
1005 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1006 hw_dbg(hw, "Buffer too small for PBA data.\n");
1007 return I40E_ERR_PARAM;
1008 }
1009
1010 for (i = 0; i < pba_size; i++) {
1011 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1012 if (status) {
1013 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1014 return status;
1015 }
1016
1017 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1018 pba_num[(i * 2) + 1] = pba_word & 0xFF;
1019 }
1020 pba_num[(pba_size * 2)] = '\0';
1021
1022 return status;
1023 }
1024
1025 /**
1026 * i40e_get_media_type - Gets media type
1027 * @hw: pointer to the hardware structure
1028 **/
i40e_get_media_type(struct i40e_hw * hw)1029 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1030 {
1031 enum i40e_media_type media;
1032
1033 switch (hw->phy.link_info.phy_type) {
1034 case I40E_PHY_TYPE_10GBASE_SR:
1035 case I40E_PHY_TYPE_10GBASE_LR:
1036 case I40E_PHY_TYPE_1000BASE_SX:
1037 case I40E_PHY_TYPE_1000BASE_LX:
1038 case I40E_PHY_TYPE_40GBASE_SR4:
1039 case I40E_PHY_TYPE_40GBASE_LR4:
1040 case I40E_PHY_TYPE_25GBASE_LR:
1041 case I40E_PHY_TYPE_25GBASE_SR:
1042 media = I40E_MEDIA_TYPE_FIBER;
1043 break;
1044 case I40E_PHY_TYPE_100BASE_TX:
1045 case I40E_PHY_TYPE_1000BASE_T:
1046 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1047 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1048 case I40E_PHY_TYPE_10GBASE_T:
1049 media = I40E_MEDIA_TYPE_BASET;
1050 break;
1051 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1052 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1053 case I40E_PHY_TYPE_10GBASE_CR1:
1054 case I40E_PHY_TYPE_40GBASE_CR4:
1055 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1056 case I40E_PHY_TYPE_40GBASE_AOC:
1057 case I40E_PHY_TYPE_10GBASE_AOC:
1058 case I40E_PHY_TYPE_25GBASE_CR:
1059 case I40E_PHY_TYPE_25GBASE_AOC:
1060 case I40E_PHY_TYPE_25GBASE_ACC:
1061 media = I40E_MEDIA_TYPE_DA;
1062 break;
1063 case I40E_PHY_TYPE_1000BASE_KX:
1064 case I40E_PHY_TYPE_10GBASE_KX4:
1065 case I40E_PHY_TYPE_10GBASE_KR:
1066 case I40E_PHY_TYPE_40GBASE_KR4:
1067 case I40E_PHY_TYPE_20GBASE_KR2:
1068 case I40E_PHY_TYPE_25GBASE_KR:
1069 media = I40E_MEDIA_TYPE_BACKPLANE;
1070 break;
1071 case I40E_PHY_TYPE_SGMII:
1072 case I40E_PHY_TYPE_XAUI:
1073 case I40E_PHY_TYPE_XFI:
1074 case I40E_PHY_TYPE_XLAUI:
1075 case I40E_PHY_TYPE_XLPPI:
1076 default:
1077 media = I40E_MEDIA_TYPE_UNKNOWN;
1078 break;
1079 }
1080
1081 return media;
1082 }
1083
1084 /**
1085 * i40e_poll_globr - Poll for Global Reset completion
1086 * @hw: pointer to the hardware structure
1087 * @retry_limit: how many times to retry before failure
1088 **/
i40e_poll_globr(struct i40e_hw * hw,u32 retry_limit)1089 static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1090 u32 retry_limit)
1091 {
1092 u32 cnt, reg = 0;
1093
1094 for (cnt = 0; cnt < retry_limit; cnt++) {
1095 reg = rd32(hw, I40E_GLGEN_RSTAT);
1096 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1097 return 0;
1098 msleep(100);
1099 }
1100
1101 hw_dbg(hw, "Global reset failed.\n");
1102 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1103
1104 return I40E_ERR_RESET_FAILED;
1105 }
1106
1107 #define I40E_PF_RESET_WAIT_COUNT_A0 200
1108 #define I40E_PF_RESET_WAIT_COUNT 200
1109 /**
1110 * i40e_pf_reset - Reset the PF
1111 * @hw: pointer to the hardware structure
1112 *
1113 * Assuming someone else has triggered a global reset,
1114 * assure the global reset is complete and then reset the PF
1115 **/
i40e_pf_reset(struct i40e_hw * hw)1116 i40e_status i40e_pf_reset(struct i40e_hw *hw)
1117 {
1118 u32 cnt = 0;
1119 u32 cnt1 = 0;
1120 u32 reg = 0;
1121 u32 grst_del;
1122
1123 /* Poll for Global Reset steady state in case of recent GRST.
1124 * The grst delay value is in 100ms units, and we'll wait a
1125 * couple counts longer to be sure we don't just miss the end.
1126 */
1127 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1128 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1129 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1130
1131 /* It can take upto 15 secs for GRST steady state.
1132 * Bump it to 16 secs max to be safe.
1133 */
1134 grst_del = grst_del * 20;
1135
1136 for (cnt = 0; cnt < grst_del; cnt++) {
1137 reg = rd32(hw, I40E_GLGEN_RSTAT);
1138 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1139 break;
1140 msleep(100);
1141 }
1142 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1143 hw_dbg(hw, "Global reset polling failed to complete.\n");
1144 return I40E_ERR_RESET_FAILED;
1145 }
1146
1147 /* Now Wait for the FW to be ready */
1148 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1149 reg = rd32(hw, I40E_GLNVM_ULD);
1150 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1151 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1152 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1153 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1154 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1155 break;
1156 }
1157 usleep_range(10000, 20000);
1158 }
1159 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1160 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1161 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1162 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1163 return I40E_ERR_RESET_FAILED;
1164 }
1165
1166 /* If there was a Global Reset in progress when we got here,
1167 * we don't need to do the PF Reset
1168 */
1169 if (!cnt) {
1170 u32 reg2 = 0;
1171 if (hw->revision_id == 0)
1172 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1173 else
1174 cnt = I40E_PF_RESET_WAIT_COUNT;
1175 reg = rd32(hw, I40E_PFGEN_CTRL);
1176 wr32(hw, I40E_PFGEN_CTRL,
1177 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1178 for (; cnt; cnt--) {
1179 reg = rd32(hw, I40E_PFGEN_CTRL);
1180 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1181 break;
1182 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1183 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1184 break;
1185 usleep_range(1000, 2000);
1186 }
1187 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1188 if (i40e_poll_globr(hw, grst_del))
1189 return I40E_ERR_RESET_FAILED;
1190 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1191 hw_dbg(hw, "PF reset polling failed to complete.\n");
1192 return I40E_ERR_RESET_FAILED;
1193 }
1194 }
1195
1196 i40e_clear_pxe_mode(hw);
1197
1198 return 0;
1199 }
1200
1201 /**
1202 * i40e_clear_hw - clear out any left over hw state
1203 * @hw: pointer to the hw struct
1204 *
1205 * Clear queues and interrupts, typically called at init time,
1206 * but after the capabilities have been found so we know how many
1207 * queues and msix vectors have been allocated.
1208 **/
i40e_clear_hw(struct i40e_hw * hw)1209 void i40e_clear_hw(struct i40e_hw *hw)
1210 {
1211 u32 num_queues, base_queue;
1212 u32 num_pf_int;
1213 u32 num_vf_int;
1214 u32 num_vfs;
1215 u32 i, j;
1216 u32 val;
1217 u32 eol = 0x7ff;
1218
1219 /* get number of interrupts, queues, and VFs */
1220 val = rd32(hw, I40E_GLPCI_CNF2);
1221 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1222 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1223 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1224 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1225
1226 val = rd32(hw, I40E_PFLAN_QALLOC);
1227 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1228 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1229 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1230 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1231 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1232 num_queues = (j - base_queue) + 1;
1233 else
1234 num_queues = 0;
1235
1236 val = rd32(hw, I40E_PF_VT_PFALLOC);
1237 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1238 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1239 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1240 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1241 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1242 num_vfs = (j - i) + 1;
1243 else
1244 num_vfs = 0;
1245
1246 /* stop all the interrupts */
1247 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1248 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1249 for (i = 0; i < num_pf_int - 2; i++)
1250 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1251
1252 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1253 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1254 wr32(hw, I40E_PFINT_LNKLST0, val);
1255 for (i = 0; i < num_pf_int - 2; i++)
1256 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1257 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1258 for (i = 0; i < num_vfs; i++)
1259 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1260 for (i = 0; i < num_vf_int - 2; i++)
1261 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1262
1263 /* warn the HW of the coming Tx disables */
1264 for (i = 0; i < num_queues; i++) {
1265 u32 abs_queue_idx = base_queue + i;
1266 u32 reg_block = 0;
1267
1268 if (abs_queue_idx >= 128) {
1269 reg_block = abs_queue_idx / 128;
1270 abs_queue_idx %= 128;
1271 }
1272
1273 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1274 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1275 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1276 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1277
1278 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1279 }
1280 udelay(400);
1281
1282 /* stop all the queues */
1283 for (i = 0; i < num_queues; i++) {
1284 wr32(hw, I40E_QINT_TQCTL(i), 0);
1285 wr32(hw, I40E_QTX_ENA(i), 0);
1286 wr32(hw, I40E_QINT_RQCTL(i), 0);
1287 wr32(hw, I40E_QRX_ENA(i), 0);
1288 }
1289
1290 /* short wait for all queue disables to settle */
1291 udelay(50);
1292 }
1293
1294 /**
1295 * i40e_clear_pxe_mode - clear pxe operations mode
1296 * @hw: pointer to the hw struct
1297 *
1298 * Make sure all PXE mode settings are cleared, including things
1299 * like descriptor fetch/write-back mode.
1300 **/
i40e_clear_pxe_mode(struct i40e_hw * hw)1301 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1302 {
1303 u32 reg;
1304
1305 if (i40e_check_asq_alive(hw))
1306 i40e_aq_clear_pxe_mode(hw, NULL);
1307
1308 /* Clear single descriptor fetch/write-back mode */
1309 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1310
1311 if (hw->revision_id == 0) {
1312 /* As a work around clear PXE_MODE instead of setting it */
1313 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1314 } else {
1315 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1316 }
1317 }
1318
1319 /**
1320 * i40e_led_is_mine - helper to find matching led
1321 * @hw: pointer to the hw struct
1322 * @idx: index into GPIO registers
1323 *
1324 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1325 */
i40e_led_is_mine(struct i40e_hw * hw,int idx)1326 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1327 {
1328 u32 gpio_val = 0;
1329 u32 port;
1330
1331 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1332 !hw->func_caps.led[idx])
1333 return 0;
1334 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1335 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1336 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1337
1338 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1339 * if it is not our port then ignore
1340 */
1341 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1342 (port != hw->port))
1343 return 0;
1344
1345 return gpio_val;
1346 }
1347
1348 #define I40E_FW_LED BIT(4)
1349 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1350 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1351
1352 #define I40E_LED0 22
1353
1354 #define I40E_PIN_FUNC_SDP 0x0
1355 #define I40E_PIN_FUNC_LED 0x1
1356
1357 /**
1358 * i40e_led_get - return current on/off mode
1359 * @hw: pointer to the hw struct
1360 *
1361 * The value returned is the 'mode' field as defined in the
1362 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1363 * values are variations of possible behaviors relating to
1364 * blink, link, and wire.
1365 **/
i40e_led_get(struct i40e_hw * hw)1366 u32 i40e_led_get(struct i40e_hw *hw)
1367 {
1368 u32 mode = 0;
1369 int i;
1370
1371 /* as per the documentation GPIO 22-29 are the LED
1372 * GPIO pins named LED0..LED7
1373 */
1374 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1375 u32 gpio_val = i40e_led_is_mine(hw, i);
1376
1377 if (!gpio_val)
1378 continue;
1379
1380 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1381 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1382 break;
1383 }
1384
1385 return mode;
1386 }
1387
1388 /**
1389 * i40e_led_set - set new on/off mode
1390 * @hw: pointer to the hw struct
1391 * @mode: 0=off, 0xf=on (else see manual for mode details)
1392 * @blink: true if the LED should blink when on, false if steady
1393 *
1394 * if this function is used to turn on the blink it should
1395 * be used to disable the blink when restoring the original state.
1396 **/
i40e_led_set(struct i40e_hw * hw,u32 mode,bool blink)1397 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1398 {
1399 int i;
1400
1401 if (mode & ~I40E_LED_MODE_VALID) {
1402 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1403 return;
1404 }
1405
1406 /* as per the documentation GPIO 22-29 are the LED
1407 * GPIO pins named LED0..LED7
1408 */
1409 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1410 u32 gpio_val = i40e_led_is_mine(hw, i);
1411
1412 if (!gpio_val)
1413 continue;
1414
1415 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1416 u32 pin_func = 0;
1417
1418 if (mode & I40E_FW_LED)
1419 pin_func = I40E_PIN_FUNC_SDP;
1420 else
1421 pin_func = I40E_PIN_FUNC_LED;
1422
1423 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1424 gpio_val |= ((pin_func <<
1425 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1426 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1427 }
1428 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1429 /* this & is a bit of paranoia, but serves as a range check */
1430 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1431 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1432
1433 if (blink)
1434 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1435 else
1436 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1437
1438 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1439 break;
1440 }
1441 }
1442
1443 /* Admin command wrappers */
1444
1445 /**
1446 * i40e_aq_get_phy_capabilities
1447 * @hw: pointer to the hw struct
1448 * @abilities: structure for PHY capabilities to be filled
1449 * @qualified_modules: report Qualified Modules
1450 * @report_init: report init capabilities (active are default)
1451 * @cmd_details: pointer to command details structure or NULL
1452 *
1453 * Returns the various PHY abilities supported on the Port.
1454 **/
i40e_aq_get_phy_capabilities(struct i40e_hw * hw,bool qualified_modules,bool report_init,struct i40e_aq_get_phy_abilities_resp * abilities,struct i40e_asq_cmd_details * cmd_details)1455 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1456 bool qualified_modules, bool report_init,
1457 struct i40e_aq_get_phy_abilities_resp *abilities,
1458 struct i40e_asq_cmd_details *cmd_details)
1459 {
1460 struct i40e_aq_desc desc;
1461 i40e_status status;
1462 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1463 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1464
1465 if (!abilities)
1466 return I40E_ERR_PARAM;
1467
1468 do {
1469 i40e_fill_default_direct_cmd_desc(&desc,
1470 i40e_aqc_opc_get_phy_abilities);
1471
1472 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1473 if (abilities_size > I40E_AQ_LARGE_BUF)
1474 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1475
1476 if (qualified_modules)
1477 desc.params.external.param0 |=
1478 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1479
1480 if (report_init)
1481 desc.params.external.param0 |=
1482 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1483
1484 status = i40e_asq_send_command(hw, &desc, abilities,
1485 abilities_size, cmd_details);
1486
1487 switch (hw->aq.asq_last_status) {
1488 case I40E_AQ_RC_EIO:
1489 status = I40E_ERR_UNKNOWN_PHY;
1490 break;
1491 case I40E_AQ_RC_EAGAIN:
1492 usleep_range(1000, 2000);
1493 total_delay++;
1494 status = I40E_ERR_TIMEOUT;
1495 break;
1496 /* also covers I40E_AQ_RC_OK */
1497 default:
1498 break;
1499 }
1500
1501 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1502 (total_delay < max_delay));
1503
1504 if (status)
1505 return status;
1506
1507 if (report_init) {
1508 if (hw->mac.type == I40E_MAC_XL710 &&
1509 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1510 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1511 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1512 } else {
1513 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1514 hw->phy.phy_types |=
1515 ((u64)abilities->phy_type_ext << 32);
1516 }
1517 }
1518
1519 return status;
1520 }
1521
1522 /**
1523 * i40e_aq_set_phy_config
1524 * @hw: pointer to the hw struct
1525 * @config: structure with PHY configuration to be set
1526 * @cmd_details: pointer to command details structure or NULL
1527 *
1528 * Set the various PHY configuration parameters
1529 * supported on the Port.One or more of the Set PHY config parameters may be
1530 * ignored in an MFP mode as the PF may not have the privilege to set some
1531 * of the PHY Config parameters. This status will be indicated by the
1532 * command response.
1533 **/
i40e_aq_set_phy_config(struct i40e_hw * hw,struct i40e_aq_set_phy_config * config,struct i40e_asq_cmd_details * cmd_details)1534 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1535 struct i40e_aq_set_phy_config *config,
1536 struct i40e_asq_cmd_details *cmd_details)
1537 {
1538 struct i40e_aq_desc desc;
1539 struct i40e_aq_set_phy_config *cmd =
1540 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1541 enum i40e_status_code status;
1542
1543 if (!config)
1544 return I40E_ERR_PARAM;
1545
1546 i40e_fill_default_direct_cmd_desc(&desc,
1547 i40e_aqc_opc_set_phy_config);
1548
1549 *cmd = *config;
1550
1551 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1552
1553 return status;
1554 }
1555
1556 static noinline_for_stack enum i40e_status_code
i40e_set_fc_status(struct i40e_hw * hw,struct i40e_aq_get_phy_abilities_resp * abilities,bool atomic_restart)1557 i40e_set_fc_status(struct i40e_hw *hw,
1558 struct i40e_aq_get_phy_abilities_resp *abilities,
1559 bool atomic_restart)
1560 {
1561 struct i40e_aq_set_phy_config config;
1562 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1563 u8 pause_mask = 0x0;
1564
1565 switch (fc_mode) {
1566 case I40E_FC_FULL:
1567 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1568 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1569 break;
1570 case I40E_FC_RX_PAUSE:
1571 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1572 break;
1573 case I40E_FC_TX_PAUSE:
1574 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1575 break;
1576 default:
1577 break;
1578 }
1579
1580 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1581 /* clear the old pause settings */
1582 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1583 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1584 /* set the new abilities */
1585 config.abilities |= pause_mask;
1586 /* If the abilities have changed, then set the new config */
1587 if (config.abilities == abilities->abilities)
1588 return 0;
1589
1590 /* Auto restart link so settings take effect */
1591 if (atomic_restart)
1592 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1593 /* Copy over all the old settings */
1594 config.phy_type = abilities->phy_type;
1595 config.phy_type_ext = abilities->phy_type_ext;
1596 config.link_speed = abilities->link_speed;
1597 config.eee_capability = abilities->eee_capability;
1598 config.eeer = abilities->eeer_val;
1599 config.low_power_ctrl = abilities->d3_lpan;
1600 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1601 I40E_AQ_PHY_FEC_CONFIG_MASK;
1602
1603 return i40e_aq_set_phy_config(hw, &config, NULL);
1604 }
1605
1606 /**
1607 * i40e_set_fc
1608 * @hw: pointer to the hw struct
1609 * @aq_failures: buffer to return AdminQ failure information
1610 * @atomic_restart: whether to enable atomic link restart
1611 *
1612 * Set the requested flow control mode using set_phy_config.
1613 **/
i40e_set_fc(struct i40e_hw * hw,u8 * aq_failures,bool atomic_restart)1614 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1615 bool atomic_restart)
1616 {
1617 struct i40e_aq_get_phy_abilities_resp abilities;
1618 enum i40e_status_code status;
1619
1620 *aq_failures = 0x0;
1621
1622 /* Get the current phy config */
1623 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1624 NULL);
1625 if (status) {
1626 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1627 return status;
1628 }
1629
1630 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1631 if (status)
1632 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1633
1634 /* Update the link info */
1635 status = i40e_update_link_info(hw);
1636 if (status) {
1637 /* Wait a little bit (on 40G cards it sometimes takes a really
1638 * long time for link to come back from the atomic reset)
1639 * and try once more
1640 */
1641 msleep(1000);
1642 status = i40e_update_link_info(hw);
1643 }
1644 if (status)
1645 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1646
1647 return status;
1648 }
1649
1650 /**
1651 * i40e_aq_clear_pxe_mode
1652 * @hw: pointer to the hw struct
1653 * @cmd_details: pointer to command details structure or NULL
1654 *
1655 * Tell the firmware that the driver is taking over from PXE
1656 **/
i40e_aq_clear_pxe_mode(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)1657 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1658 struct i40e_asq_cmd_details *cmd_details)
1659 {
1660 i40e_status status;
1661 struct i40e_aq_desc desc;
1662 struct i40e_aqc_clear_pxe *cmd =
1663 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1664
1665 i40e_fill_default_direct_cmd_desc(&desc,
1666 i40e_aqc_opc_clear_pxe_mode);
1667
1668 cmd->rx_cnt = 0x2;
1669
1670 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1671
1672 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1673
1674 return status;
1675 }
1676
1677 /**
1678 * i40e_aq_set_link_restart_an
1679 * @hw: pointer to the hw struct
1680 * @enable_link: if true: enable link, if false: disable link
1681 * @cmd_details: pointer to command details structure or NULL
1682 *
1683 * Sets up the link and restarts the Auto-Negotiation over the link.
1684 **/
i40e_aq_set_link_restart_an(struct i40e_hw * hw,bool enable_link,struct i40e_asq_cmd_details * cmd_details)1685 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1686 bool enable_link,
1687 struct i40e_asq_cmd_details *cmd_details)
1688 {
1689 struct i40e_aq_desc desc;
1690 struct i40e_aqc_set_link_restart_an *cmd =
1691 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1692 i40e_status status;
1693
1694 i40e_fill_default_direct_cmd_desc(&desc,
1695 i40e_aqc_opc_set_link_restart_an);
1696
1697 cmd->command = I40E_AQ_PHY_RESTART_AN;
1698 if (enable_link)
1699 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1700 else
1701 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1702
1703 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1704
1705 return status;
1706 }
1707
1708 /**
1709 * i40e_aq_get_link_info
1710 * @hw: pointer to the hw struct
1711 * @enable_lse: enable/disable LinkStatusEvent reporting
1712 * @link: pointer to link status structure - optional
1713 * @cmd_details: pointer to command details structure or NULL
1714 *
1715 * Returns the link status of the adapter.
1716 **/
i40e_aq_get_link_info(struct i40e_hw * hw,bool enable_lse,struct i40e_link_status * link,struct i40e_asq_cmd_details * cmd_details)1717 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1718 bool enable_lse, struct i40e_link_status *link,
1719 struct i40e_asq_cmd_details *cmd_details)
1720 {
1721 struct i40e_aq_desc desc;
1722 struct i40e_aqc_get_link_status *resp =
1723 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1724 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1725 i40e_status status;
1726 bool tx_pause, rx_pause;
1727 u16 command_flags;
1728
1729 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1730
1731 if (enable_lse)
1732 command_flags = I40E_AQ_LSE_ENABLE;
1733 else
1734 command_flags = I40E_AQ_LSE_DISABLE;
1735 resp->command_flags = cpu_to_le16(command_flags);
1736
1737 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1738
1739 if (status)
1740 goto aq_get_link_info_exit;
1741
1742 /* save off old link status information */
1743 hw->phy.link_info_old = *hw_link_info;
1744
1745 /* update link status */
1746 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1747 hw->phy.media_type = i40e_get_media_type(hw);
1748 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1749 hw_link_info->link_info = resp->link_info;
1750 hw_link_info->an_info = resp->an_info;
1751 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1752 I40E_AQ_CONFIG_FEC_RS_ENA);
1753 hw_link_info->ext_info = resp->ext_info;
1754 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1755 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1756 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1757
1758 /* update fc info */
1759 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1760 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1761 if (tx_pause & rx_pause)
1762 hw->fc.current_mode = I40E_FC_FULL;
1763 else if (tx_pause)
1764 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1765 else if (rx_pause)
1766 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1767 else
1768 hw->fc.current_mode = I40E_FC_NONE;
1769
1770 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1771 hw_link_info->crc_enable = true;
1772 else
1773 hw_link_info->crc_enable = false;
1774
1775 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1776 hw_link_info->lse_enable = true;
1777 else
1778 hw_link_info->lse_enable = false;
1779
1780 if ((hw->mac.type == I40E_MAC_XL710) &&
1781 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1782 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1783 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1784
1785 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1786 hw->mac.type != I40E_MAC_X722) {
1787 __le32 tmp;
1788
1789 memcpy(&tmp, resp->link_type, sizeof(tmp));
1790 hw->phy.phy_types = le32_to_cpu(tmp);
1791 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1792 }
1793
1794 /* save link status information */
1795 if (link)
1796 *link = *hw_link_info;
1797
1798 /* flag cleared so helper functions don't call AQ again */
1799 hw->phy.get_link_info = false;
1800
1801 aq_get_link_info_exit:
1802 return status;
1803 }
1804
1805 /**
1806 * i40e_aq_set_phy_int_mask
1807 * @hw: pointer to the hw struct
1808 * @mask: interrupt mask to be set
1809 * @cmd_details: pointer to command details structure or NULL
1810 *
1811 * Set link interrupt mask.
1812 **/
i40e_aq_set_phy_int_mask(struct i40e_hw * hw,u16 mask,struct i40e_asq_cmd_details * cmd_details)1813 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1814 u16 mask,
1815 struct i40e_asq_cmd_details *cmd_details)
1816 {
1817 struct i40e_aq_desc desc;
1818 struct i40e_aqc_set_phy_int_mask *cmd =
1819 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1820 i40e_status status;
1821
1822 i40e_fill_default_direct_cmd_desc(&desc,
1823 i40e_aqc_opc_set_phy_int_mask);
1824
1825 cmd->event_mask = cpu_to_le16(mask);
1826
1827 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1828
1829 return status;
1830 }
1831
1832 /**
1833 * i40e_aq_set_phy_debug
1834 * @hw: pointer to the hw struct
1835 * @cmd_flags: debug command flags
1836 * @cmd_details: pointer to command details structure or NULL
1837 *
1838 * Reset the external PHY.
1839 **/
i40e_aq_set_phy_debug(struct i40e_hw * hw,u8 cmd_flags,struct i40e_asq_cmd_details * cmd_details)1840 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1841 struct i40e_asq_cmd_details *cmd_details)
1842 {
1843 struct i40e_aq_desc desc;
1844 struct i40e_aqc_set_phy_debug *cmd =
1845 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1846 i40e_status status;
1847
1848 i40e_fill_default_direct_cmd_desc(&desc,
1849 i40e_aqc_opc_set_phy_debug);
1850
1851 cmd->command_flags = cmd_flags;
1852
1853 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1854
1855 return status;
1856 }
1857
1858 /**
1859 * i40e_is_aq_api_ver_ge
1860 * @aq: pointer to AdminQ info containing HW API version to compare
1861 * @maj: API major value
1862 * @min: API minor value
1863 *
1864 * Assert whether current HW API version is greater/equal than provided.
1865 **/
i40e_is_aq_api_ver_ge(struct i40e_adminq_info * aq,u16 maj,u16 min)1866 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1867 u16 min)
1868 {
1869 return (aq->api_maj_ver > maj ||
1870 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1871 }
1872
1873 /**
1874 * i40e_aq_add_vsi
1875 * @hw: pointer to the hw struct
1876 * @vsi_ctx: pointer to a vsi context struct
1877 * @cmd_details: pointer to command details structure or NULL
1878 *
1879 * Add a VSI context to the hardware.
1880 **/
i40e_aq_add_vsi(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)1881 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1882 struct i40e_vsi_context *vsi_ctx,
1883 struct i40e_asq_cmd_details *cmd_details)
1884 {
1885 struct i40e_aq_desc desc;
1886 struct i40e_aqc_add_get_update_vsi *cmd =
1887 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1888 struct i40e_aqc_add_get_update_vsi_completion *resp =
1889 (struct i40e_aqc_add_get_update_vsi_completion *)
1890 &desc.params.raw;
1891 i40e_status status;
1892
1893 i40e_fill_default_direct_cmd_desc(&desc,
1894 i40e_aqc_opc_add_vsi);
1895
1896 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1897 cmd->connection_type = vsi_ctx->connection_type;
1898 cmd->vf_id = vsi_ctx->vf_num;
1899 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1900
1901 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1902
1903 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1904 sizeof(vsi_ctx->info),
1905 cmd_details, true);
1906
1907 if (status)
1908 goto aq_add_vsi_exit;
1909
1910 vsi_ctx->seid = le16_to_cpu(resp->seid);
1911 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1912 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1913 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1914
1915 aq_add_vsi_exit:
1916 return status;
1917 }
1918
1919 /**
1920 * i40e_aq_set_default_vsi
1921 * @hw: pointer to the hw struct
1922 * @seid: vsi number
1923 * @cmd_details: pointer to command details structure or NULL
1924 **/
i40e_aq_set_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1925 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1926 u16 seid,
1927 struct i40e_asq_cmd_details *cmd_details)
1928 {
1929 struct i40e_aq_desc desc;
1930 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1931 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1932 &desc.params.raw;
1933 i40e_status status;
1934
1935 i40e_fill_default_direct_cmd_desc(&desc,
1936 i40e_aqc_opc_set_vsi_promiscuous_modes);
1937
1938 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1939 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1940 cmd->seid = cpu_to_le16(seid);
1941
1942 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1943
1944 return status;
1945 }
1946
1947 /**
1948 * i40e_aq_clear_default_vsi
1949 * @hw: pointer to the hw struct
1950 * @seid: vsi number
1951 * @cmd_details: pointer to command details structure or NULL
1952 **/
i40e_aq_clear_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1953 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1954 u16 seid,
1955 struct i40e_asq_cmd_details *cmd_details)
1956 {
1957 struct i40e_aq_desc desc;
1958 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1959 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1960 &desc.params.raw;
1961 i40e_status status;
1962
1963 i40e_fill_default_direct_cmd_desc(&desc,
1964 i40e_aqc_opc_set_vsi_promiscuous_modes);
1965
1966 cmd->promiscuous_flags = cpu_to_le16(0);
1967 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1968 cmd->seid = cpu_to_le16(seid);
1969
1970 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1971
1972 return status;
1973 }
1974
1975 /**
1976 * i40e_aq_set_vsi_unicast_promiscuous
1977 * @hw: pointer to the hw struct
1978 * @seid: vsi number
1979 * @set: set unicast promiscuous enable/disable
1980 * @cmd_details: pointer to command details structure or NULL
1981 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1982 **/
i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details,bool rx_only_promisc)1983 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1984 u16 seid, bool set,
1985 struct i40e_asq_cmd_details *cmd_details,
1986 bool rx_only_promisc)
1987 {
1988 struct i40e_aq_desc desc;
1989 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1990 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1991 i40e_status status;
1992 u16 flags = 0;
1993
1994 i40e_fill_default_direct_cmd_desc(&desc,
1995 i40e_aqc_opc_set_vsi_promiscuous_modes);
1996
1997 if (set) {
1998 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1999 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2000 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2001 }
2002
2003 cmd->promiscuous_flags = cpu_to_le16(flags);
2004
2005 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2006 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2007 cmd->valid_flags |=
2008 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2009
2010 cmd->seid = cpu_to_le16(seid);
2011 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2012
2013 return status;
2014 }
2015
2016 /**
2017 * i40e_aq_set_vsi_multicast_promiscuous
2018 * @hw: pointer to the hw struct
2019 * @seid: vsi number
2020 * @set: set multicast promiscuous enable/disable
2021 * @cmd_details: pointer to command details structure or NULL
2022 **/
i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details)2023 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2024 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2025 {
2026 struct i40e_aq_desc desc;
2027 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2028 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2029 i40e_status status;
2030 u16 flags = 0;
2031
2032 i40e_fill_default_direct_cmd_desc(&desc,
2033 i40e_aqc_opc_set_vsi_promiscuous_modes);
2034
2035 if (set)
2036 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2037
2038 cmd->promiscuous_flags = cpu_to_le16(flags);
2039
2040 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2041
2042 cmd->seid = cpu_to_le16(seid);
2043 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2044
2045 return status;
2046 }
2047
2048 /**
2049 * i40e_aq_set_vsi_mc_promisc_on_vlan
2050 * @hw: pointer to the hw struct
2051 * @seid: vsi number
2052 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2053 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2054 * @cmd_details: pointer to command details structure or NULL
2055 **/
i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)2056 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2057 u16 seid, bool enable,
2058 u16 vid,
2059 struct i40e_asq_cmd_details *cmd_details)
2060 {
2061 struct i40e_aq_desc desc;
2062 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2063 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2064 enum i40e_status_code status;
2065 u16 flags = 0;
2066
2067 i40e_fill_default_direct_cmd_desc(&desc,
2068 i40e_aqc_opc_set_vsi_promiscuous_modes);
2069
2070 if (enable)
2071 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2072
2073 cmd->promiscuous_flags = cpu_to_le16(flags);
2074 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2075 cmd->seid = cpu_to_le16(seid);
2076 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2077
2078 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2079 cmd_details, true);
2080
2081 return status;
2082 }
2083
2084 /**
2085 * i40e_aq_set_vsi_uc_promisc_on_vlan
2086 * @hw: pointer to the hw struct
2087 * @seid: vsi number
2088 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2089 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2090 * @cmd_details: pointer to command details structure or NULL
2091 **/
i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)2092 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2093 u16 seid, bool enable,
2094 u16 vid,
2095 struct i40e_asq_cmd_details *cmd_details)
2096 {
2097 struct i40e_aq_desc desc;
2098 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2099 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2100 enum i40e_status_code status;
2101 u16 flags = 0;
2102
2103 i40e_fill_default_direct_cmd_desc(&desc,
2104 i40e_aqc_opc_set_vsi_promiscuous_modes);
2105
2106 if (enable) {
2107 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2108 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2109 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2110 }
2111
2112 cmd->promiscuous_flags = cpu_to_le16(flags);
2113 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2114 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2115 cmd->valid_flags |=
2116 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2117 cmd->seid = cpu_to_le16(seid);
2118 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2119
2120 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2121 cmd_details, true);
2122
2123 return status;
2124 }
2125
2126 /**
2127 * i40e_aq_set_vsi_bc_promisc_on_vlan
2128 * @hw: pointer to the hw struct
2129 * @seid: vsi number
2130 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2131 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2132 * @cmd_details: pointer to command details structure or NULL
2133 **/
i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)2134 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2135 u16 seid, bool enable, u16 vid,
2136 struct i40e_asq_cmd_details *cmd_details)
2137 {
2138 struct i40e_aq_desc desc;
2139 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2140 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2141 i40e_status status;
2142 u16 flags = 0;
2143
2144 i40e_fill_default_direct_cmd_desc(&desc,
2145 i40e_aqc_opc_set_vsi_promiscuous_modes);
2146
2147 if (enable)
2148 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2149
2150 cmd->promiscuous_flags = cpu_to_le16(flags);
2151 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2152 cmd->seid = cpu_to_le16(seid);
2153 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2154
2155 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2156
2157 return status;
2158 }
2159
2160 /**
2161 * i40e_aq_set_vsi_broadcast
2162 * @hw: pointer to the hw struct
2163 * @seid: vsi number
2164 * @set_filter: true to set filter, false to clear filter
2165 * @cmd_details: pointer to command details structure or NULL
2166 *
2167 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2168 **/
i40e_aq_set_vsi_broadcast(struct i40e_hw * hw,u16 seid,bool set_filter,struct i40e_asq_cmd_details * cmd_details)2169 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2170 u16 seid, bool set_filter,
2171 struct i40e_asq_cmd_details *cmd_details)
2172 {
2173 struct i40e_aq_desc desc;
2174 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2175 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2176 i40e_status status;
2177
2178 i40e_fill_default_direct_cmd_desc(&desc,
2179 i40e_aqc_opc_set_vsi_promiscuous_modes);
2180
2181 if (set_filter)
2182 cmd->promiscuous_flags
2183 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2184 else
2185 cmd->promiscuous_flags
2186 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2187
2188 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2189 cmd->seid = cpu_to_le16(seid);
2190 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2191
2192 return status;
2193 }
2194
2195 /**
2196 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2197 * @hw: pointer to the hw struct
2198 * @seid: vsi number
2199 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2200 * @cmd_details: pointer to command details structure or NULL
2201 **/
i40e_aq_set_vsi_vlan_promisc(struct i40e_hw * hw,u16 seid,bool enable,struct i40e_asq_cmd_details * cmd_details)2202 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2203 u16 seid, bool enable,
2204 struct i40e_asq_cmd_details *cmd_details)
2205 {
2206 struct i40e_aq_desc desc;
2207 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2208 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2209 i40e_status status;
2210 u16 flags = 0;
2211
2212 i40e_fill_default_direct_cmd_desc(&desc,
2213 i40e_aqc_opc_set_vsi_promiscuous_modes);
2214 if (enable)
2215 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2216
2217 cmd->promiscuous_flags = cpu_to_le16(flags);
2218 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2219 cmd->seid = cpu_to_le16(seid);
2220
2221 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2222
2223 return status;
2224 }
2225
2226 /**
2227 * i40e_aq_get_vsi_params - get VSI configuration info
2228 * @hw: pointer to the hw struct
2229 * @vsi_ctx: pointer to a vsi context struct
2230 * @cmd_details: pointer to command details structure or NULL
2231 **/
i40e_aq_get_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2232 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2233 struct i40e_vsi_context *vsi_ctx,
2234 struct i40e_asq_cmd_details *cmd_details)
2235 {
2236 struct i40e_aq_desc desc;
2237 struct i40e_aqc_add_get_update_vsi *cmd =
2238 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2239 struct i40e_aqc_add_get_update_vsi_completion *resp =
2240 (struct i40e_aqc_add_get_update_vsi_completion *)
2241 &desc.params.raw;
2242 i40e_status status;
2243
2244 i40e_fill_default_direct_cmd_desc(&desc,
2245 i40e_aqc_opc_get_vsi_parameters);
2246
2247 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2248
2249 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2250
2251 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2252 sizeof(vsi_ctx->info), NULL);
2253
2254 if (status)
2255 goto aq_get_vsi_params_exit;
2256
2257 vsi_ctx->seid = le16_to_cpu(resp->seid);
2258 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2259 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2260 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2261
2262 aq_get_vsi_params_exit:
2263 return status;
2264 }
2265
2266 /**
2267 * i40e_aq_update_vsi_params
2268 * @hw: pointer to the hw struct
2269 * @vsi_ctx: pointer to a vsi context struct
2270 * @cmd_details: pointer to command details structure or NULL
2271 *
2272 * Update a VSI context.
2273 **/
i40e_aq_update_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2274 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2275 struct i40e_vsi_context *vsi_ctx,
2276 struct i40e_asq_cmd_details *cmd_details)
2277 {
2278 struct i40e_aq_desc desc;
2279 struct i40e_aqc_add_get_update_vsi *cmd =
2280 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2281 struct i40e_aqc_add_get_update_vsi_completion *resp =
2282 (struct i40e_aqc_add_get_update_vsi_completion *)
2283 &desc.params.raw;
2284 i40e_status status;
2285
2286 i40e_fill_default_direct_cmd_desc(&desc,
2287 i40e_aqc_opc_update_vsi_parameters);
2288 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2289
2290 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2291
2292 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2293 sizeof(vsi_ctx->info),
2294 cmd_details, true);
2295
2296 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2297 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2298
2299 return status;
2300 }
2301
2302 /**
2303 * i40e_aq_get_switch_config
2304 * @hw: pointer to the hardware structure
2305 * @buf: pointer to the result buffer
2306 * @buf_size: length of input buffer
2307 * @start_seid: seid to start for the report, 0 == beginning
2308 * @cmd_details: pointer to command details structure or NULL
2309 *
2310 * Fill the buf with switch configuration returned from AdminQ command
2311 **/
i40e_aq_get_switch_config(struct i40e_hw * hw,struct i40e_aqc_get_switch_config_resp * buf,u16 buf_size,u16 * start_seid,struct i40e_asq_cmd_details * cmd_details)2312 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2313 struct i40e_aqc_get_switch_config_resp *buf,
2314 u16 buf_size, u16 *start_seid,
2315 struct i40e_asq_cmd_details *cmd_details)
2316 {
2317 struct i40e_aq_desc desc;
2318 struct i40e_aqc_switch_seid *scfg =
2319 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2320 i40e_status status;
2321
2322 i40e_fill_default_direct_cmd_desc(&desc,
2323 i40e_aqc_opc_get_switch_config);
2324 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2325 if (buf_size > I40E_AQ_LARGE_BUF)
2326 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2327 scfg->seid = cpu_to_le16(*start_seid);
2328
2329 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2330 *start_seid = le16_to_cpu(scfg->seid);
2331
2332 return status;
2333 }
2334
2335 /**
2336 * i40e_aq_set_switch_config
2337 * @hw: pointer to the hardware structure
2338 * @flags: bit flag values to set
2339 * @mode: cloud filter mode
2340 * @valid_flags: which bit flags to set
2341 * @mode: cloud filter mode
2342 * @cmd_details: pointer to command details structure or NULL
2343 *
2344 * Set switch configuration bits
2345 **/
i40e_aq_set_switch_config(struct i40e_hw * hw,u16 flags,u16 valid_flags,u8 mode,struct i40e_asq_cmd_details * cmd_details)2346 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2347 u16 flags,
2348 u16 valid_flags, u8 mode,
2349 struct i40e_asq_cmd_details *cmd_details)
2350 {
2351 struct i40e_aq_desc desc;
2352 struct i40e_aqc_set_switch_config *scfg =
2353 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2354 enum i40e_status_code status;
2355
2356 i40e_fill_default_direct_cmd_desc(&desc,
2357 i40e_aqc_opc_set_switch_config);
2358 scfg->flags = cpu_to_le16(flags);
2359 scfg->valid_flags = cpu_to_le16(valid_flags);
2360 scfg->mode = mode;
2361 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2362 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2363 scfg->first_tag = cpu_to_le16(hw->first_tag);
2364 scfg->second_tag = cpu_to_le16(hw->second_tag);
2365 }
2366 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2367
2368 return status;
2369 }
2370
2371 /**
2372 * i40e_aq_get_firmware_version
2373 * @hw: pointer to the hw struct
2374 * @fw_major_version: firmware major version
2375 * @fw_minor_version: firmware minor version
2376 * @fw_build: firmware build number
2377 * @api_major_version: major queue version
2378 * @api_minor_version: minor queue version
2379 * @cmd_details: pointer to command details structure or NULL
2380 *
2381 * Get the firmware version from the admin queue commands
2382 **/
i40e_aq_get_firmware_version(struct i40e_hw * hw,u16 * fw_major_version,u16 * fw_minor_version,u32 * fw_build,u16 * api_major_version,u16 * api_minor_version,struct i40e_asq_cmd_details * cmd_details)2383 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2384 u16 *fw_major_version, u16 *fw_minor_version,
2385 u32 *fw_build,
2386 u16 *api_major_version, u16 *api_minor_version,
2387 struct i40e_asq_cmd_details *cmd_details)
2388 {
2389 struct i40e_aq_desc desc;
2390 struct i40e_aqc_get_version *resp =
2391 (struct i40e_aqc_get_version *)&desc.params.raw;
2392 i40e_status status;
2393
2394 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2395
2396 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2397
2398 if (!status) {
2399 if (fw_major_version)
2400 *fw_major_version = le16_to_cpu(resp->fw_major);
2401 if (fw_minor_version)
2402 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2403 if (fw_build)
2404 *fw_build = le32_to_cpu(resp->fw_build);
2405 if (api_major_version)
2406 *api_major_version = le16_to_cpu(resp->api_major);
2407 if (api_minor_version)
2408 *api_minor_version = le16_to_cpu(resp->api_minor);
2409 }
2410
2411 return status;
2412 }
2413
2414 /**
2415 * i40e_aq_send_driver_version
2416 * @hw: pointer to the hw struct
2417 * @dv: driver's major, minor version
2418 * @cmd_details: pointer to command details structure or NULL
2419 *
2420 * Send the driver version to the firmware
2421 **/
i40e_aq_send_driver_version(struct i40e_hw * hw,struct i40e_driver_version * dv,struct i40e_asq_cmd_details * cmd_details)2422 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2423 struct i40e_driver_version *dv,
2424 struct i40e_asq_cmd_details *cmd_details)
2425 {
2426 struct i40e_aq_desc desc;
2427 struct i40e_aqc_driver_version *cmd =
2428 (struct i40e_aqc_driver_version *)&desc.params.raw;
2429 i40e_status status;
2430 u16 len;
2431
2432 if (dv == NULL)
2433 return I40E_ERR_PARAM;
2434
2435 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2436
2437 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2438 cmd->driver_major_ver = dv->major_version;
2439 cmd->driver_minor_ver = dv->minor_version;
2440 cmd->driver_build_ver = dv->build_version;
2441 cmd->driver_subbuild_ver = dv->subbuild_version;
2442
2443 len = 0;
2444 while (len < sizeof(dv->driver_string) &&
2445 (dv->driver_string[len] < 0x80) &&
2446 dv->driver_string[len])
2447 len++;
2448 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2449 len, cmd_details);
2450
2451 return status;
2452 }
2453
2454 /**
2455 * i40e_get_link_status - get status of the HW network link
2456 * @hw: pointer to the hw struct
2457 * @link_up: pointer to bool (true/false = linkup/linkdown)
2458 *
2459 * Variable link_up true if link is up, false if link is down.
2460 * The variable link_up is invalid if returned value of status != 0
2461 *
2462 * Side effect: LinkStatusEvent reporting becomes enabled
2463 **/
i40e_get_link_status(struct i40e_hw * hw,bool * link_up)2464 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2465 {
2466 i40e_status status = 0;
2467
2468 if (hw->phy.get_link_info) {
2469 status = i40e_update_link_info(hw);
2470
2471 if (status)
2472 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2473 status);
2474 }
2475
2476 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2477
2478 return status;
2479 }
2480
2481 /**
2482 * i40e_update_link_info - update status of the HW network link
2483 * @hw: pointer to the hw struct
2484 **/
i40e_update_link_info(struct i40e_hw * hw)2485 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2486 {
2487 struct i40e_aq_get_phy_abilities_resp abilities;
2488 i40e_status status = 0;
2489
2490 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2491 if (status)
2492 return status;
2493
2494 /* extra checking needed to ensure link info to user is timely */
2495 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2496 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2497 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2498 status = i40e_aq_get_phy_capabilities(hw, false, false,
2499 &abilities, NULL);
2500 if (status)
2501 return status;
2502
2503 if (abilities.fec_cfg_curr_mod_ext_info &
2504 I40E_AQ_ENABLE_FEC_AUTO)
2505 hw->phy.link_info.req_fec_info =
2506 (I40E_AQ_REQUEST_FEC_KR |
2507 I40E_AQ_REQUEST_FEC_RS);
2508 else
2509 hw->phy.link_info.req_fec_info =
2510 abilities.fec_cfg_curr_mod_ext_info &
2511 (I40E_AQ_REQUEST_FEC_KR |
2512 I40E_AQ_REQUEST_FEC_RS);
2513
2514 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2515 sizeof(hw->phy.link_info.module_type));
2516 }
2517
2518 return status;
2519 }
2520
2521 /**
2522 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2523 * @hw: pointer to the hw struct
2524 * @uplink_seid: the MAC or other gizmo SEID
2525 * @downlink_seid: the VSI SEID
2526 * @enabled_tc: bitmap of TCs to be enabled
2527 * @default_port: true for default port VSI, false for control port
2528 * @veb_seid: pointer to where to put the resulting VEB SEID
2529 * @enable_stats: true to turn on VEB stats
2530 * @cmd_details: pointer to command details structure or NULL
2531 *
2532 * This asks the FW to add a VEB between the uplink and downlink
2533 * elements. If the uplink SEID is 0, this will be a floating VEB.
2534 **/
i40e_aq_add_veb(struct i40e_hw * hw,u16 uplink_seid,u16 downlink_seid,u8 enabled_tc,bool default_port,u16 * veb_seid,bool enable_stats,struct i40e_asq_cmd_details * cmd_details)2535 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2536 u16 downlink_seid, u8 enabled_tc,
2537 bool default_port, u16 *veb_seid,
2538 bool enable_stats,
2539 struct i40e_asq_cmd_details *cmd_details)
2540 {
2541 struct i40e_aq_desc desc;
2542 struct i40e_aqc_add_veb *cmd =
2543 (struct i40e_aqc_add_veb *)&desc.params.raw;
2544 struct i40e_aqc_add_veb_completion *resp =
2545 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2546 i40e_status status;
2547 u16 veb_flags = 0;
2548
2549 /* SEIDs need to either both be set or both be 0 for floating VEB */
2550 if (!!uplink_seid != !!downlink_seid)
2551 return I40E_ERR_PARAM;
2552
2553 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2554
2555 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2556 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2557 cmd->enable_tcs = enabled_tc;
2558 if (!uplink_seid)
2559 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2560 if (default_port)
2561 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2562 else
2563 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2564
2565 /* reverse logic here: set the bitflag to disable the stats */
2566 if (!enable_stats)
2567 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2568
2569 cmd->veb_flags = cpu_to_le16(veb_flags);
2570
2571 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2572
2573 if (!status && veb_seid)
2574 *veb_seid = le16_to_cpu(resp->veb_seid);
2575
2576 return status;
2577 }
2578
2579 /**
2580 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2581 * @hw: pointer to the hw struct
2582 * @veb_seid: the SEID of the VEB to query
2583 * @switch_id: the uplink switch id
2584 * @floating: set to true if the VEB is floating
2585 * @statistic_index: index of the stats counter block for this VEB
2586 * @vebs_used: number of VEB's used by function
2587 * @vebs_free: total VEB's not reserved by any function
2588 * @cmd_details: pointer to command details structure or NULL
2589 *
2590 * This retrieves the parameters for a particular VEB, specified by
2591 * uplink_seid, and returns them to the caller.
2592 **/
i40e_aq_get_veb_parameters(struct i40e_hw * hw,u16 veb_seid,u16 * switch_id,bool * floating,u16 * statistic_index,u16 * vebs_used,u16 * vebs_free,struct i40e_asq_cmd_details * cmd_details)2593 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2594 u16 veb_seid, u16 *switch_id,
2595 bool *floating, u16 *statistic_index,
2596 u16 *vebs_used, u16 *vebs_free,
2597 struct i40e_asq_cmd_details *cmd_details)
2598 {
2599 struct i40e_aq_desc desc;
2600 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2601 (struct i40e_aqc_get_veb_parameters_completion *)
2602 &desc.params.raw;
2603 i40e_status status;
2604
2605 if (veb_seid == 0)
2606 return I40E_ERR_PARAM;
2607
2608 i40e_fill_default_direct_cmd_desc(&desc,
2609 i40e_aqc_opc_get_veb_parameters);
2610 cmd_resp->seid = cpu_to_le16(veb_seid);
2611
2612 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2613 if (status)
2614 goto get_veb_exit;
2615
2616 if (switch_id)
2617 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2618 if (statistic_index)
2619 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2620 if (vebs_used)
2621 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2622 if (vebs_free)
2623 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2624 if (floating) {
2625 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2626
2627 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2628 *floating = true;
2629 else
2630 *floating = false;
2631 }
2632
2633 get_veb_exit:
2634 return status;
2635 }
2636
2637 /**
2638 * i40e_prepare_add_macvlan
2639 * @mv_list: list of macvlans to be added
2640 * @desc: pointer to AQ descriptor structure
2641 * @count: length of the list
2642 * @seid: VSI for the mac address
2643 *
2644 * Internal helper function that prepares the add macvlan request
2645 * and returns the buffer size.
2646 **/
2647 static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data * mv_list,struct i40e_aq_desc * desc,u16 count,u16 seid)2648 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2649 struct i40e_aq_desc *desc, u16 count, u16 seid)
2650 {
2651 struct i40e_aqc_macvlan *cmd =
2652 (struct i40e_aqc_macvlan *)&desc->params.raw;
2653 u16 buf_size;
2654 int i;
2655
2656 buf_size = count * sizeof(*mv_list);
2657
2658 /* prep the rest of the request */
2659 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2660 cmd->num_addresses = cpu_to_le16(count);
2661 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2662 cmd->seid[1] = 0;
2663 cmd->seid[2] = 0;
2664
2665 for (i = 0; i < count; i++)
2666 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2667 mv_list[i].flags |=
2668 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2669
2670 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2671 if (buf_size > I40E_AQ_LARGE_BUF)
2672 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2673
2674 return buf_size;
2675 }
2676
2677 /**
2678 * i40e_aq_add_macvlan
2679 * @hw: pointer to the hw struct
2680 * @seid: VSI for the mac address
2681 * @mv_list: list of macvlans to be added
2682 * @count: length of the list
2683 * @cmd_details: pointer to command details structure or NULL
2684 *
2685 * Add MAC/VLAN addresses to the HW filtering
2686 **/
2687 i40e_status
i40e_aq_add_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2688 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2689 struct i40e_aqc_add_macvlan_element_data *mv_list,
2690 u16 count, struct i40e_asq_cmd_details *cmd_details)
2691 {
2692 struct i40e_aq_desc desc;
2693 u16 buf_size;
2694
2695 if (count == 0 || !mv_list || !hw)
2696 return I40E_ERR_PARAM;
2697
2698 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2699
2700 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2701 cmd_details, true);
2702 }
2703
2704 /**
2705 * i40e_aq_add_macvlan_v2
2706 * @hw: pointer to the hw struct
2707 * @seid: VSI for the mac address
2708 * @mv_list: list of macvlans to be added
2709 * @count: length of the list
2710 * @cmd_details: pointer to command details structure or NULL
2711 * @aq_status: pointer to Admin Queue status return value
2712 *
2713 * Add MAC/VLAN addresses to the HW filtering.
2714 * The _v2 version returns the last Admin Queue status in aq_status
2715 * to avoid race conditions in access to hw->aq.asq_last_status.
2716 * It also calls _v2 versions of asq_send_command functions to
2717 * get the aq_status on the stack.
2718 **/
2719 i40e_status
i40e_aq_add_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2720 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2721 struct i40e_aqc_add_macvlan_element_data *mv_list,
2722 u16 count, struct i40e_asq_cmd_details *cmd_details,
2723 enum i40e_admin_queue_err *aq_status)
2724 {
2725 struct i40e_aq_desc desc;
2726 u16 buf_size;
2727
2728 if (count == 0 || !mv_list || !hw)
2729 return I40E_ERR_PARAM;
2730
2731 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2732
2733 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2734 cmd_details, true, aq_status);
2735 }
2736
2737 /**
2738 * i40e_aq_remove_macvlan
2739 * @hw: pointer to the hw struct
2740 * @seid: VSI for the mac address
2741 * @mv_list: list of macvlans to be removed
2742 * @count: length of the list
2743 * @cmd_details: pointer to command details structure or NULL
2744 *
2745 * Remove MAC/VLAN addresses from the HW filtering
2746 **/
i40e_aq_remove_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2747 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2748 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2749 u16 count, struct i40e_asq_cmd_details *cmd_details)
2750 {
2751 struct i40e_aq_desc desc;
2752 struct i40e_aqc_macvlan *cmd =
2753 (struct i40e_aqc_macvlan *)&desc.params.raw;
2754 i40e_status status;
2755 u16 buf_size;
2756
2757 if (count == 0 || !mv_list || !hw)
2758 return I40E_ERR_PARAM;
2759
2760 buf_size = count * sizeof(*mv_list);
2761
2762 /* prep the rest of the request */
2763 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2764 cmd->num_addresses = cpu_to_le16(count);
2765 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2766 cmd->seid[1] = 0;
2767 cmd->seid[2] = 0;
2768
2769 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2770 if (buf_size > I40E_AQ_LARGE_BUF)
2771 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2772
2773 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2774 cmd_details, true);
2775
2776 return status;
2777 }
2778
2779 /**
2780 * i40e_aq_remove_macvlan_v2
2781 * @hw: pointer to the hw struct
2782 * @seid: VSI for the mac address
2783 * @mv_list: list of macvlans to be removed
2784 * @count: length of the list
2785 * @cmd_details: pointer to command details structure or NULL
2786 * @aq_status: pointer to Admin Queue status return value
2787 *
2788 * Remove MAC/VLAN addresses from the HW filtering.
2789 * The _v2 version returns the last Admin Queue status in aq_status
2790 * to avoid race conditions in access to hw->aq.asq_last_status.
2791 * It also calls _v2 versions of asq_send_command functions to
2792 * get the aq_status on the stack.
2793 **/
2794 i40e_status
i40e_aq_remove_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2795 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2796 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2797 u16 count, struct i40e_asq_cmd_details *cmd_details,
2798 enum i40e_admin_queue_err *aq_status)
2799 {
2800 struct i40e_aqc_macvlan *cmd;
2801 struct i40e_aq_desc desc;
2802 u16 buf_size;
2803
2804 if (count == 0 || !mv_list || !hw)
2805 return I40E_ERR_PARAM;
2806
2807 buf_size = count * sizeof(*mv_list);
2808
2809 /* prep the rest of the request */
2810 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2811 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2812 cmd->num_addresses = cpu_to_le16(count);
2813 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2814 cmd->seid[1] = 0;
2815 cmd->seid[2] = 0;
2816
2817 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2818 if (buf_size > I40E_AQ_LARGE_BUF)
2819 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2820
2821 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2822 cmd_details, true, aq_status);
2823 }
2824
2825 /**
2826 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2827 * @hw: pointer to the hw struct
2828 * @opcode: AQ opcode for add or delete mirror rule
2829 * @sw_seid: Switch SEID (to which rule refers)
2830 * @rule_type: Rule Type (ingress/egress/VLAN)
2831 * @id: Destination VSI SEID or Rule ID
2832 * @count: length of the list
2833 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2834 * @cmd_details: pointer to command details structure or NULL
2835 * @rule_id: Rule ID returned from FW
2836 * @rules_used: Number of rules used in internal switch
2837 * @rules_free: Number of rules free in internal switch
2838 *
2839 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2840 * VEBs/VEPA elements only
2841 **/
i40e_mirrorrule_op(struct i40e_hw * hw,u16 opcode,u16 sw_seid,u16 rule_type,u16 id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2842 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2843 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2844 u16 count, __le16 *mr_list,
2845 struct i40e_asq_cmd_details *cmd_details,
2846 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2847 {
2848 struct i40e_aq_desc desc;
2849 struct i40e_aqc_add_delete_mirror_rule *cmd =
2850 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2851 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2852 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2853 i40e_status status;
2854 u16 buf_size;
2855
2856 buf_size = count * sizeof(*mr_list);
2857
2858 /* prep the rest of the request */
2859 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2860 cmd->seid = cpu_to_le16(sw_seid);
2861 cmd->rule_type = cpu_to_le16(rule_type &
2862 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2863 cmd->num_entries = cpu_to_le16(count);
2864 /* Dest VSI for add, rule_id for delete */
2865 cmd->destination = cpu_to_le16(id);
2866 if (mr_list) {
2867 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2868 I40E_AQ_FLAG_RD));
2869 if (buf_size > I40E_AQ_LARGE_BUF)
2870 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2871 }
2872
2873 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2874 cmd_details);
2875 if (!status ||
2876 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2877 if (rule_id)
2878 *rule_id = le16_to_cpu(resp->rule_id);
2879 if (rules_used)
2880 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2881 if (rules_free)
2882 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2883 }
2884 return status;
2885 }
2886
2887 /**
2888 * i40e_aq_add_mirrorrule - add a mirror rule
2889 * @hw: pointer to the hw struct
2890 * @sw_seid: Switch SEID (to which rule refers)
2891 * @rule_type: Rule Type (ingress/egress/VLAN)
2892 * @dest_vsi: SEID of VSI to which packets will be mirrored
2893 * @count: length of the list
2894 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2895 * @cmd_details: pointer to command details structure or NULL
2896 * @rule_id: Rule ID returned from FW
2897 * @rules_used: Number of rules used in internal switch
2898 * @rules_free: Number of rules free in internal switch
2899 *
2900 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2901 **/
i40e_aq_add_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 dest_vsi,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2902 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2903 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2904 struct i40e_asq_cmd_details *cmd_details,
2905 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2906 {
2907 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2908 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2909 if (count == 0 || !mr_list)
2910 return I40E_ERR_PARAM;
2911 }
2912
2913 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2914 rule_type, dest_vsi, count, mr_list,
2915 cmd_details, rule_id, rules_used, rules_free);
2916 }
2917
2918 /**
2919 * i40e_aq_delete_mirrorrule - delete a mirror rule
2920 * @hw: pointer to the hw struct
2921 * @sw_seid: Switch SEID (to which rule refers)
2922 * @rule_type: Rule Type (ingress/egress/VLAN)
2923 * @count: length of the list
2924 * @rule_id: Rule ID that is returned in the receive desc as part of
2925 * add_mirrorrule.
2926 * @mr_list: list of mirrored VLAN IDs to be removed
2927 * @cmd_details: pointer to command details structure or NULL
2928 * @rules_used: Number of rules used in internal switch
2929 * @rules_free: Number of rules free in internal switch
2930 *
2931 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2932 **/
i40e_aq_delete_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 rule_id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rules_used,u16 * rules_free)2933 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2934 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2935 struct i40e_asq_cmd_details *cmd_details,
2936 u16 *rules_used, u16 *rules_free)
2937 {
2938 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2939 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2940 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2941 * mirroring. For other rule_type, count and rule_type should
2942 * not matter.
2943 */
2944 if (count == 0 || !mr_list)
2945 return I40E_ERR_PARAM;
2946 }
2947
2948 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2949 rule_type, rule_id, count, mr_list,
2950 cmd_details, NULL, rules_used, rules_free);
2951 }
2952
2953 /**
2954 * i40e_aq_send_msg_to_vf
2955 * @hw: pointer to the hardware structure
2956 * @vfid: VF id to send msg
2957 * @v_opcode: opcodes for VF-PF communication
2958 * @v_retval: return error code
2959 * @msg: pointer to the msg buffer
2960 * @msglen: msg length
2961 * @cmd_details: pointer to command details
2962 *
2963 * send msg to vf
2964 **/
i40e_aq_send_msg_to_vf(struct i40e_hw * hw,u16 vfid,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen,struct i40e_asq_cmd_details * cmd_details)2965 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2966 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2967 struct i40e_asq_cmd_details *cmd_details)
2968 {
2969 struct i40e_aq_desc desc;
2970 struct i40e_aqc_pf_vf_message *cmd =
2971 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2972 i40e_status status;
2973
2974 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2975 cmd->id = cpu_to_le32(vfid);
2976 desc.cookie_high = cpu_to_le32(v_opcode);
2977 desc.cookie_low = cpu_to_le32(v_retval);
2978 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2979 if (msglen) {
2980 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2981 I40E_AQ_FLAG_RD));
2982 if (msglen > I40E_AQ_LARGE_BUF)
2983 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2984 desc.datalen = cpu_to_le16(msglen);
2985 }
2986 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2987
2988 return status;
2989 }
2990
2991 /**
2992 * i40e_aq_debug_read_register
2993 * @hw: pointer to the hw struct
2994 * @reg_addr: register address
2995 * @reg_val: register value
2996 * @cmd_details: pointer to command details structure or NULL
2997 *
2998 * Read the register using the admin queue commands
2999 **/
i40e_aq_debug_read_register(struct i40e_hw * hw,u32 reg_addr,u64 * reg_val,struct i40e_asq_cmd_details * cmd_details)3000 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
3001 u32 reg_addr, u64 *reg_val,
3002 struct i40e_asq_cmd_details *cmd_details)
3003 {
3004 struct i40e_aq_desc desc;
3005 struct i40e_aqc_debug_reg_read_write *cmd_resp =
3006 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3007 i40e_status status;
3008
3009 if (reg_val == NULL)
3010 return I40E_ERR_PARAM;
3011
3012 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
3013
3014 cmd_resp->address = cpu_to_le32(reg_addr);
3015
3016 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3017
3018 if (!status) {
3019 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
3020 (u64)le32_to_cpu(cmd_resp->value_low);
3021 }
3022
3023 return status;
3024 }
3025
3026 /**
3027 * i40e_aq_debug_write_register
3028 * @hw: pointer to the hw struct
3029 * @reg_addr: register address
3030 * @reg_val: register value
3031 * @cmd_details: pointer to command details structure or NULL
3032 *
3033 * Write to a register using the admin queue commands
3034 **/
i40e_aq_debug_write_register(struct i40e_hw * hw,u32 reg_addr,u64 reg_val,struct i40e_asq_cmd_details * cmd_details)3035 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3036 u32 reg_addr, u64 reg_val,
3037 struct i40e_asq_cmd_details *cmd_details)
3038 {
3039 struct i40e_aq_desc desc;
3040 struct i40e_aqc_debug_reg_read_write *cmd =
3041 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3042 i40e_status status;
3043
3044 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3045
3046 cmd->address = cpu_to_le32(reg_addr);
3047 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3048 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3049
3050 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3051
3052 return status;
3053 }
3054
3055 /**
3056 * i40e_aq_request_resource
3057 * @hw: pointer to the hw struct
3058 * @resource: resource id
3059 * @access: access type
3060 * @sdp_number: resource number
3061 * @timeout: the maximum time in ms that the driver may hold the resource
3062 * @cmd_details: pointer to command details structure or NULL
3063 *
3064 * requests common resource using the admin queue commands
3065 **/
i40e_aq_request_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,enum i40e_aq_resource_access_type access,u8 sdp_number,u64 * timeout,struct i40e_asq_cmd_details * cmd_details)3066 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3067 enum i40e_aq_resources_ids resource,
3068 enum i40e_aq_resource_access_type access,
3069 u8 sdp_number, u64 *timeout,
3070 struct i40e_asq_cmd_details *cmd_details)
3071 {
3072 struct i40e_aq_desc desc;
3073 struct i40e_aqc_request_resource *cmd_resp =
3074 (struct i40e_aqc_request_resource *)&desc.params.raw;
3075 i40e_status status;
3076
3077 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3078
3079 cmd_resp->resource_id = cpu_to_le16(resource);
3080 cmd_resp->access_type = cpu_to_le16(access);
3081 cmd_resp->resource_number = cpu_to_le32(sdp_number);
3082
3083 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3084 /* The completion specifies the maximum time in ms that the driver
3085 * may hold the resource in the Timeout field.
3086 * If the resource is held by someone else, the command completes with
3087 * busy return value and the timeout field indicates the maximum time
3088 * the current owner of the resource has to free it.
3089 */
3090 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3091 *timeout = le32_to_cpu(cmd_resp->timeout);
3092
3093 return status;
3094 }
3095
3096 /**
3097 * i40e_aq_release_resource
3098 * @hw: pointer to the hw struct
3099 * @resource: resource id
3100 * @sdp_number: resource number
3101 * @cmd_details: pointer to command details structure or NULL
3102 *
3103 * release common resource using the admin queue commands
3104 **/
i40e_aq_release_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,u8 sdp_number,struct i40e_asq_cmd_details * cmd_details)3105 i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3106 enum i40e_aq_resources_ids resource,
3107 u8 sdp_number,
3108 struct i40e_asq_cmd_details *cmd_details)
3109 {
3110 struct i40e_aq_desc desc;
3111 struct i40e_aqc_request_resource *cmd =
3112 (struct i40e_aqc_request_resource *)&desc.params.raw;
3113 i40e_status status;
3114
3115 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3116
3117 cmd->resource_id = cpu_to_le16(resource);
3118 cmd->resource_number = cpu_to_le32(sdp_number);
3119
3120 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3121
3122 return status;
3123 }
3124
3125 /**
3126 * i40e_aq_read_nvm
3127 * @hw: pointer to the hw struct
3128 * @module_pointer: module pointer location in words from the NVM beginning
3129 * @offset: byte offset from the module beginning
3130 * @length: length of the section to be read (in bytes from the offset)
3131 * @data: command buffer (size [bytes] = length)
3132 * @last_command: tells if this is the last command in a series
3133 * @cmd_details: pointer to command details structure or NULL
3134 *
3135 * Read the NVM using the admin queue commands
3136 **/
i40e_aq_read_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,struct i40e_asq_cmd_details * cmd_details)3137 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3138 u32 offset, u16 length, void *data,
3139 bool last_command,
3140 struct i40e_asq_cmd_details *cmd_details)
3141 {
3142 struct i40e_aq_desc desc;
3143 struct i40e_aqc_nvm_update *cmd =
3144 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3145 i40e_status status;
3146
3147 /* In offset the highest byte must be zeroed. */
3148 if (offset & 0xFF000000) {
3149 status = I40E_ERR_PARAM;
3150 goto i40e_aq_read_nvm_exit;
3151 }
3152
3153 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3154
3155 /* If this is the last command in a series, set the proper flag. */
3156 if (last_command)
3157 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3158 cmd->module_pointer = module_pointer;
3159 cmd->offset = cpu_to_le32(offset);
3160 cmd->length = cpu_to_le16(length);
3161
3162 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3163 if (length > I40E_AQ_LARGE_BUF)
3164 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3165
3166 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3167
3168 i40e_aq_read_nvm_exit:
3169 return status;
3170 }
3171
3172 /**
3173 * i40e_aq_erase_nvm
3174 * @hw: pointer to the hw struct
3175 * @module_pointer: module pointer location in words from the NVM beginning
3176 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3177 * @length: length of the section to be erased (expressed in 4 KB)
3178 * @last_command: tells if this is the last command in a series
3179 * @cmd_details: pointer to command details structure or NULL
3180 *
3181 * Erase the NVM sector using the admin queue commands
3182 **/
i40e_aq_erase_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,bool last_command,struct i40e_asq_cmd_details * cmd_details)3183 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3184 u32 offset, u16 length, bool last_command,
3185 struct i40e_asq_cmd_details *cmd_details)
3186 {
3187 struct i40e_aq_desc desc;
3188 struct i40e_aqc_nvm_update *cmd =
3189 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3190 i40e_status status;
3191
3192 /* In offset the highest byte must be zeroed. */
3193 if (offset & 0xFF000000) {
3194 status = I40E_ERR_PARAM;
3195 goto i40e_aq_erase_nvm_exit;
3196 }
3197
3198 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3199
3200 /* If this is the last command in a series, set the proper flag. */
3201 if (last_command)
3202 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3203 cmd->module_pointer = module_pointer;
3204 cmd->offset = cpu_to_le32(offset);
3205 cmd->length = cpu_to_le16(length);
3206
3207 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3208
3209 i40e_aq_erase_nvm_exit:
3210 return status;
3211 }
3212
3213 /**
3214 * i40e_parse_discover_capabilities
3215 * @hw: pointer to the hw struct
3216 * @buff: pointer to a buffer containing device/function capability records
3217 * @cap_count: number of capability records in the list
3218 * @list_type_opc: type of capabilities list to parse
3219 *
3220 * Parse the device/function capabilities list.
3221 **/
i40e_parse_discover_capabilities(struct i40e_hw * hw,void * buff,u32 cap_count,enum i40e_admin_queue_opc list_type_opc)3222 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3223 u32 cap_count,
3224 enum i40e_admin_queue_opc list_type_opc)
3225 {
3226 struct i40e_aqc_list_capabilities_element_resp *cap;
3227 u32 valid_functions, num_functions;
3228 u32 number, logical_id, phys_id;
3229 struct i40e_hw_capabilities *p;
3230 u16 id, ocp_cfg_word0;
3231 i40e_status status;
3232 u8 major_rev;
3233 u32 i = 0;
3234
3235 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3236
3237 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3238 p = &hw->dev_caps;
3239 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3240 p = &hw->func_caps;
3241 else
3242 return;
3243
3244 for (i = 0; i < cap_count; i++, cap++) {
3245 id = le16_to_cpu(cap->id);
3246 number = le32_to_cpu(cap->number);
3247 logical_id = le32_to_cpu(cap->logical_id);
3248 phys_id = le32_to_cpu(cap->phys_id);
3249 major_rev = cap->major_rev;
3250
3251 switch (id) {
3252 case I40E_AQ_CAP_ID_SWITCH_MODE:
3253 p->switch_mode = number;
3254 break;
3255 case I40E_AQ_CAP_ID_MNG_MODE:
3256 p->management_mode = number;
3257 if (major_rev > 1) {
3258 p->mng_protocols_over_mctp = logical_id;
3259 i40e_debug(hw, I40E_DEBUG_INIT,
3260 "HW Capability: Protocols over MCTP = %d\n",
3261 p->mng_protocols_over_mctp);
3262 } else {
3263 p->mng_protocols_over_mctp = 0;
3264 }
3265 break;
3266 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3267 p->npar_enable = number;
3268 break;
3269 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3270 p->os2bmc = number;
3271 break;
3272 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3273 p->valid_functions = number;
3274 break;
3275 case I40E_AQ_CAP_ID_SRIOV:
3276 if (number == 1)
3277 p->sr_iov_1_1 = true;
3278 break;
3279 case I40E_AQ_CAP_ID_VF:
3280 p->num_vfs = number;
3281 p->vf_base_id = logical_id;
3282 break;
3283 case I40E_AQ_CAP_ID_VMDQ:
3284 if (number == 1)
3285 p->vmdq = true;
3286 break;
3287 case I40E_AQ_CAP_ID_8021QBG:
3288 if (number == 1)
3289 p->evb_802_1_qbg = true;
3290 break;
3291 case I40E_AQ_CAP_ID_8021QBR:
3292 if (number == 1)
3293 p->evb_802_1_qbh = true;
3294 break;
3295 case I40E_AQ_CAP_ID_VSI:
3296 p->num_vsis = number;
3297 break;
3298 case I40E_AQ_CAP_ID_DCB:
3299 if (number == 1) {
3300 p->dcb = true;
3301 p->enabled_tcmap = logical_id;
3302 p->maxtc = phys_id;
3303 }
3304 break;
3305 case I40E_AQ_CAP_ID_FCOE:
3306 if (number == 1)
3307 p->fcoe = true;
3308 break;
3309 case I40E_AQ_CAP_ID_ISCSI:
3310 if (number == 1)
3311 p->iscsi = true;
3312 break;
3313 case I40E_AQ_CAP_ID_RSS:
3314 p->rss = true;
3315 p->rss_table_size = number;
3316 p->rss_table_entry_width = logical_id;
3317 break;
3318 case I40E_AQ_CAP_ID_RXQ:
3319 p->num_rx_qp = number;
3320 p->base_queue = phys_id;
3321 break;
3322 case I40E_AQ_CAP_ID_TXQ:
3323 p->num_tx_qp = number;
3324 p->base_queue = phys_id;
3325 break;
3326 case I40E_AQ_CAP_ID_MSIX:
3327 p->num_msix_vectors = number;
3328 i40e_debug(hw, I40E_DEBUG_INIT,
3329 "HW Capability: MSIX vector count = %d\n",
3330 p->num_msix_vectors);
3331 break;
3332 case I40E_AQ_CAP_ID_VF_MSIX:
3333 p->num_msix_vectors_vf = number;
3334 break;
3335 case I40E_AQ_CAP_ID_FLEX10:
3336 if (major_rev == 1) {
3337 if (number == 1) {
3338 p->flex10_enable = true;
3339 p->flex10_capable = true;
3340 }
3341 } else {
3342 /* Capability revision >= 2 */
3343 if (number & 1)
3344 p->flex10_enable = true;
3345 if (number & 2)
3346 p->flex10_capable = true;
3347 }
3348 p->flex10_mode = logical_id;
3349 p->flex10_status = phys_id;
3350 break;
3351 case I40E_AQ_CAP_ID_CEM:
3352 if (number == 1)
3353 p->mgmt_cem = true;
3354 break;
3355 case I40E_AQ_CAP_ID_IWARP:
3356 if (number == 1)
3357 p->iwarp = true;
3358 break;
3359 case I40E_AQ_CAP_ID_LED:
3360 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3361 p->led[phys_id] = true;
3362 break;
3363 case I40E_AQ_CAP_ID_SDP:
3364 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3365 p->sdp[phys_id] = true;
3366 break;
3367 case I40E_AQ_CAP_ID_MDIO:
3368 if (number == 1) {
3369 p->mdio_port_num = phys_id;
3370 p->mdio_port_mode = logical_id;
3371 }
3372 break;
3373 case I40E_AQ_CAP_ID_1588:
3374 if (number == 1)
3375 p->ieee_1588 = true;
3376 break;
3377 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3378 p->fd = true;
3379 p->fd_filters_guaranteed = number;
3380 p->fd_filters_best_effort = logical_id;
3381 break;
3382 case I40E_AQ_CAP_ID_WSR_PROT:
3383 p->wr_csr_prot = (u64)number;
3384 p->wr_csr_prot |= (u64)logical_id << 32;
3385 break;
3386 case I40E_AQ_CAP_ID_NVM_MGMT:
3387 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3388 p->sec_rev_disabled = true;
3389 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3390 p->update_disabled = true;
3391 break;
3392 default:
3393 break;
3394 }
3395 }
3396
3397 if (p->fcoe)
3398 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3399
3400 /* Software override ensuring FCoE is disabled if npar or mfp
3401 * mode because it is not supported in these modes.
3402 */
3403 if (p->npar_enable || p->flex10_enable)
3404 p->fcoe = false;
3405
3406 /* count the enabled ports (aka the "not disabled" ports) */
3407 hw->num_ports = 0;
3408 for (i = 0; i < 4; i++) {
3409 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3410 u64 port_cfg = 0;
3411
3412 /* use AQ read to get the physical register offset instead
3413 * of the port relative offset
3414 */
3415 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3416 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3417 hw->num_ports++;
3418 }
3419
3420 /* OCP cards case: if a mezz is removed the Ethernet port is at
3421 * disabled state in PRTGEN_CNF register. Additional NVM read is
3422 * needed in order to check if we are dealing with OCP card.
3423 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3424 * physical ports results in wrong partition id calculation and thus
3425 * not supporting WoL.
3426 */
3427 if (hw->mac.type == I40E_MAC_X722) {
3428 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3429 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3430 2 * I40E_SR_OCP_CFG_WORD0,
3431 sizeof(ocp_cfg_word0),
3432 &ocp_cfg_word0, true, NULL);
3433 if (!status &&
3434 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3435 hw->num_ports = 4;
3436 i40e_release_nvm(hw);
3437 }
3438 }
3439
3440 valid_functions = p->valid_functions;
3441 num_functions = 0;
3442 while (valid_functions) {
3443 if (valid_functions & 1)
3444 num_functions++;
3445 valid_functions >>= 1;
3446 }
3447
3448 /* partition id is 1-based, and functions are evenly spread
3449 * across the ports as partitions
3450 */
3451 if (hw->num_ports != 0) {
3452 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3453 hw->num_partitions = num_functions / hw->num_ports;
3454 }
3455
3456 /* additional HW specific goodies that might
3457 * someday be HW version specific
3458 */
3459 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3460 }
3461
3462 /**
3463 * i40e_aq_discover_capabilities
3464 * @hw: pointer to the hw struct
3465 * @buff: a virtual buffer to hold the capabilities
3466 * @buff_size: Size of the virtual buffer
3467 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3468 * @list_type_opc: capabilities type to discover - pass in the command opcode
3469 * @cmd_details: pointer to command details structure or NULL
3470 *
3471 * Get the device capabilities descriptions from the firmware
3472 **/
i40e_aq_discover_capabilities(struct i40e_hw * hw,void * buff,u16 buff_size,u16 * data_size,enum i40e_admin_queue_opc list_type_opc,struct i40e_asq_cmd_details * cmd_details)3473 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3474 void *buff, u16 buff_size, u16 *data_size,
3475 enum i40e_admin_queue_opc list_type_opc,
3476 struct i40e_asq_cmd_details *cmd_details)
3477 {
3478 struct i40e_aqc_list_capabilites *cmd;
3479 struct i40e_aq_desc desc;
3480 i40e_status status = 0;
3481
3482 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3483
3484 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3485 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3486 status = I40E_ERR_PARAM;
3487 goto exit;
3488 }
3489
3490 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3491
3492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3493 if (buff_size > I40E_AQ_LARGE_BUF)
3494 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3495
3496 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3497 *data_size = le16_to_cpu(desc.datalen);
3498
3499 if (status)
3500 goto exit;
3501
3502 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3503 list_type_opc);
3504
3505 exit:
3506 return status;
3507 }
3508
3509 /**
3510 * i40e_aq_update_nvm
3511 * @hw: pointer to the hw struct
3512 * @module_pointer: module pointer location in words from the NVM beginning
3513 * @offset: byte offset from the module beginning
3514 * @length: length of the section to be written (in bytes from the offset)
3515 * @data: command buffer (size [bytes] = length)
3516 * @last_command: tells if this is the last command in a series
3517 * @preservation_flags: Preservation mode flags
3518 * @cmd_details: pointer to command details structure or NULL
3519 *
3520 * Update the NVM using the admin queue commands
3521 **/
i40e_aq_update_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,u8 preservation_flags,struct i40e_asq_cmd_details * cmd_details)3522 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3523 u32 offset, u16 length, void *data,
3524 bool last_command, u8 preservation_flags,
3525 struct i40e_asq_cmd_details *cmd_details)
3526 {
3527 struct i40e_aq_desc desc;
3528 struct i40e_aqc_nvm_update *cmd =
3529 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3530 i40e_status status;
3531
3532 /* In offset the highest byte must be zeroed. */
3533 if (offset & 0xFF000000) {
3534 status = I40E_ERR_PARAM;
3535 goto i40e_aq_update_nvm_exit;
3536 }
3537
3538 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3539
3540 /* If this is the last command in a series, set the proper flag. */
3541 if (last_command)
3542 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3543 if (hw->mac.type == I40E_MAC_X722) {
3544 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3545 cmd->command_flags |=
3546 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3547 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3548 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3549 cmd->command_flags |=
3550 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3551 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3552 }
3553 cmd->module_pointer = module_pointer;
3554 cmd->offset = cpu_to_le32(offset);
3555 cmd->length = cpu_to_le16(length);
3556
3557 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3558 if (length > I40E_AQ_LARGE_BUF)
3559 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3560
3561 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3562
3563 i40e_aq_update_nvm_exit:
3564 return status;
3565 }
3566
3567 /**
3568 * i40e_aq_rearrange_nvm
3569 * @hw: pointer to the hw struct
3570 * @rearrange_nvm: defines direction of rearrangement
3571 * @cmd_details: pointer to command details structure or NULL
3572 *
3573 * Rearrange NVM structure, available only for transition FW
3574 **/
i40e_aq_rearrange_nvm(struct i40e_hw * hw,u8 rearrange_nvm,struct i40e_asq_cmd_details * cmd_details)3575 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3576 u8 rearrange_nvm,
3577 struct i40e_asq_cmd_details *cmd_details)
3578 {
3579 struct i40e_aqc_nvm_update *cmd;
3580 i40e_status status;
3581 struct i40e_aq_desc desc;
3582
3583 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3584
3585 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3586
3587 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3588 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3589
3590 if (!rearrange_nvm) {
3591 status = I40E_ERR_PARAM;
3592 goto i40e_aq_rearrange_nvm_exit;
3593 }
3594
3595 cmd->command_flags |= rearrange_nvm;
3596 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3597
3598 i40e_aq_rearrange_nvm_exit:
3599 return status;
3600 }
3601
3602 /**
3603 * i40e_aq_get_lldp_mib
3604 * @hw: pointer to the hw struct
3605 * @bridge_type: type of bridge requested
3606 * @mib_type: Local, Remote or both Local and Remote MIBs
3607 * @buff: pointer to a user supplied buffer to store the MIB block
3608 * @buff_size: size of the buffer (in bytes)
3609 * @local_len : length of the returned Local LLDP MIB
3610 * @remote_len: length of the returned Remote LLDP MIB
3611 * @cmd_details: pointer to command details structure or NULL
3612 *
3613 * Requests the complete LLDP MIB (entire packet).
3614 **/
i40e_aq_get_lldp_mib(struct i40e_hw * hw,u8 bridge_type,u8 mib_type,void * buff,u16 buff_size,u16 * local_len,u16 * remote_len,struct i40e_asq_cmd_details * cmd_details)3615 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3616 u8 mib_type, void *buff, u16 buff_size,
3617 u16 *local_len, u16 *remote_len,
3618 struct i40e_asq_cmd_details *cmd_details)
3619 {
3620 struct i40e_aq_desc desc;
3621 struct i40e_aqc_lldp_get_mib *cmd =
3622 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3623 struct i40e_aqc_lldp_get_mib *resp =
3624 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3625 i40e_status status;
3626
3627 if (buff_size == 0 || !buff)
3628 return I40E_ERR_PARAM;
3629
3630 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3631 /* Indirect Command */
3632 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3633
3634 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3635 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3636 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3637
3638 desc.datalen = cpu_to_le16(buff_size);
3639
3640 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3641 if (buff_size > I40E_AQ_LARGE_BUF)
3642 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3643
3644 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3645 if (!status) {
3646 if (local_len != NULL)
3647 *local_len = le16_to_cpu(resp->local_len);
3648 if (remote_len != NULL)
3649 *remote_len = le16_to_cpu(resp->remote_len);
3650 }
3651
3652 return status;
3653 }
3654
3655 /**
3656 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3657 * @hw: pointer to the hw struct
3658 * @mib_type: Local, Remote or both Local and Remote MIBs
3659 * @buff: pointer to a user supplied buffer to store the MIB block
3660 * @buff_size: size of the buffer (in bytes)
3661 * @cmd_details: pointer to command details structure or NULL
3662 *
3663 * Set the LLDP MIB.
3664 **/
3665 enum i40e_status_code
i40e_aq_set_lldp_mib(struct i40e_hw * hw,u8 mib_type,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3666 i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3667 u8 mib_type, void *buff, u16 buff_size,
3668 struct i40e_asq_cmd_details *cmd_details)
3669 {
3670 struct i40e_aqc_lldp_set_local_mib *cmd;
3671 enum i40e_status_code status;
3672 struct i40e_aq_desc desc;
3673
3674 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3675 if (buff_size == 0 || !buff)
3676 return I40E_ERR_PARAM;
3677
3678 i40e_fill_default_direct_cmd_desc(&desc,
3679 i40e_aqc_opc_lldp_set_local_mib);
3680 /* Indirect Command */
3681 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3682 if (buff_size > I40E_AQ_LARGE_BUF)
3683 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3684 desc.datalen = cpu_to_le16(buff_size);
3685
3686 cmd->type = mib_type;
3687 cmd->length = cpu_to_le16(buff_size);
3688 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3689 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3690
3691 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3692 return status;
3693 }
3694
3695 /**
3696 * i40e_aq_cfg_lldp_mib_change_event
3697 * @hw: pointer to the hw struct
3698 * @enable_update: Enable or Disable event posting
3699 * @cmd_details: pointer to command details structure or NULL
3700 *
3701 * Enable or Disable posting of an event on ARQ when LLDP MIB
3702 * associated with the interface changes
3703 **/
i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw * hw,bool enable_update,struct i40e_asq_cmd_details * cmd_details)3704 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3705 bool enable_update,
3706 struct i40e_asq_cmd_details *cmd_details)
3707 {
3708 struct i40e_aq_desc desc;
3709 struct i40e_aqc_lldp_update_mib *cmd =
3710 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3711 i40e_status status;
3712
3713 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3714
3715 if (!enable_update)
3716 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3717
3718 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3719
3720 return status;
3721 }
3722
3723 /**
3724 * i40e_aq_restore_lldp
3725 * @hw: pointer to the hw struct
3726 * @setting: pointer to factory setting variable or NULL
3727 * @restore: True if factory settings should be restored
3728 * @cmd_details: pointer to command details structure or NULL
3729 *
3730 * Restore LLDP Agent factory settings if @restore set to True. In other case
3731 * only returns factory setting in AQ response.
3732 **/
3733 enum i40e_status_code
i40e_aq_restore_lldp(struct i40e_hw * hw,u8 * setting,bool restore,struct i40e_asq_cmd_details * cmd_details)3734 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3735 struct i40e_asq_cmd_details *cmd_details)
3736 {
3737 struct i40e_aq_desc desc;
3738 struct i40e_aqc_lldp_restore *cmd =
3739 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3740 i40e_status status;
3741
3742 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3743 i40e_debug(hw, I40E_DEBUG_ALL,
3744 "Restore LLDP not supported by current FW version.\n");
3745 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3746 }
3747
3748 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3749
3750 if (restore)
3751 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3752
3753 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3754
3755 if (setting)
3756 *setting = cmd->command & 1;
3757
3758 return status;
3759 }
3760
3761 /**
3762 * i40e_aq_stop_lldp
3763 * @hw: pointer to the hw struct
3764 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3765 * @persist: True if stop of LLDP should be persistent across power cycles
3766 * @cmd_details: pointer to command details structure or NULL
3767 *
3768 * Stop or Shutdown the embedded LLDP Agent
3769 **/
i40e_aq_stop_lldp(struct i40e_hw * hw,bool shutdown_agent,bool persist,struct i40e_asq_cmd_details * cmd_details)3770 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3771 bool persist,
3772 struct i40e_asq_cmd_details *cmd_details)
3773 {
3774 struct i40e_aq_desc desc;
3775 struct i40e_aqc_lldp_stop *cmd =
3776 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3777 i40e_status status;
3778
3779 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3780
3781 if (shutdown_agent)
3782 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3783
3784 if (persist) {
3785 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3786 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3787 else
3788 i40e_debug(hw, I40E_DEBUG_ALL,
3789 "Persistent Stop LLDP not supported by current FW version.\n");
3790 }
3791
3792 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3793
3794 return status;
3795 }
3796
3797 /**
3798 * i40e_aq_start_lldp
3799 * @hw: pointer to the hw struct
3800 * @persist: True if start of LLDP should be persistent across power cycles
3801 * @cmd_details: pointer to command details structure or NULL
3802 *
3803 * Start the embedded LLDP Agent on all ports.
3804 **/
i40e_aq_start_lldp(struct i40e_hw * hw,bool persist,struct i40e_asq_cmd_details * cmd_details)3805 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3806 struct i40e_asq_cmd_details *cmd_details)
3807 {
3808 struct i40e_aq_desc desc;
3809 struct i40e_aqc_lldp_start *cmd =
3810 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3811 i40e_status status;
3812
3813 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3814
3815 cmd->command = I40E_AQ_LLDP_AGENT_START;
3816
3817 if (persist) {
3818 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3819 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3820 else
3821 i40e_debug(hw, I40E_DEBUG_ALL,
3822 "Persistent Start LLDP not supported by current FW version.\n");
3823 }
3824
3825 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3826
3827 return status;
3828 }
3829
3830 /**
3831 * i40e_aq_set_dcb_parameters
3832 * @hw: pointer to the hw struct
3833 * @cmd_details: pointer to command details structure or NULL
3834 * @dcb_enable: True if DCB configuration needs to be applied
3835 *
3836 **/
3837 enum i40e_status_code
i40e_aq_set_dcb_parameters(struct i40e_hw * hw,bool dcb_enable,struct i40e_asq_cmd_details * cmd_details)3838 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3839 struct i40e_asq_cmd_details *cmd_details)
3840 {
3841 struct i40e_aq_desc desc;
3842 struct i40e_aqc_set_dcb_parameters *cmd =
3843 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3844 i40e_status status;
3845
3846 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3847 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3848
3849 i40e_fill_default_direct_cmd_desc(&desc,
3850 i40e_aqc_opc_set_dcb_parameters);
3851
3852 if (dcb_enable) {
3853 cmd->valid_flags = I40E_DCB_VALID;
3854 cmd->command = I40E_AQ_DCB_SET_AGENT;
3855 }
3856 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3857
3858 return status;
3859 }
3860
3861 /**
3862 * i40e_aq_get_cee_dcb_config
3863 * @hw: pointer to the hw struct
3864 * @buff: response buffer that stores CEE operational configuration
3865 * @buff_size: size of the buffer passed
3866 * @cmd_details: pointer to command details structure or NULL
3867 *
3868 * Get CEE DCBX mode operational configuration from firmware
3869 **/
i40e_aq_get_cee_dcb_config(struct i40e_hw * hw,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3870 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3871 void *buff, u16 buff_size,
3872 struct i40e_asq_cmd_details *cmd_details)
3873 {
3874 struct i40e_aq_desc desc;
3875 i40e_status status;
3876
3877 if (buff_size == 0 || !buff)
3878 return I40E_ERR_PARAM;
3879
3880 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3881
3882 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3883 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3884 cmd_details);
3885
3886 return status;
3887 }
3888
3889 /**
3890 * i40e_aq_add_udp_tunnel
3891 * @hw: pointer to the hw struct
3892 * @udp_port: the UDP port to add in Host byte order
3893 * @protocol_index: protocol index type
3894 * @filter_index: pointer to filter index
3895 * @cmd_details: pointer to command details structure or NULL
3896 *
3897 * Note: Firmware expects the udp_port value to be in Little Endian format,
3898 * and this function will call cpu_to_le16 to convert from Host byte order to
3899 * Little Endian order.
3900 **/
i40e_aq_add_udp_tunnel(struct i40e_hw * hw,u16 udp_port,u8 protocol_index,u8 * filter_index,struct i40e_asq_cmd_details * cmd_details)3901 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3902 u16 udp_port, u8 protocol_index,
3903 u8 *filter_index,
3904 struct i40e_asq_cmd_details *cmd_details)
3905 {
3906 struct i40e_aq_desc desc;
3907 struct i40e_aqc_add_udp_tunnel *cmd =
3908 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3909 struct i40e_aqc_del_udp_tunnel_completion *resp =
3910 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3911 i40e_status status;
3912
3913 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3914
3915 cmd->udp_port = cpu_to_le16(udp_port);
3916 cmd->protocol_type = protocol_index;
3917
3918 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3919
3920 if (!status && filter_index)
3921 *filter_index = resp->index;
3922
3923 return status;
3924 }
3925
3926 /**
3927 * i40e_aq_del_udp_tunnel
3928 * @hw: pointer to the hw struct
3929 * @index: filter index
3930 * @cmd_details: pointer to command details structure or NULL
3931 **/
i40e_aq_del_udp_tunnel(struct i40e_hw * hw,u8 index,struct i40e_asq_cmd_details * cmd_details)3932 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3933 struct i40e_asq_cmd_details *cmd_details)
3934 {
3935 struct i40e_aq_desc desc;
3936 struct i40e_aqc_remove_udp_tunnel *cmd =
3937 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3938 i40e_status status;
3939
3940 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3941
3942 cmd->index = index;
3943
3944 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3945
3946 return status;
3947 }
3948
3949 /**
3950 * i40e_aq_delete_element - Delete switch element
3951 * @hw: pointer to the hw struct
3952 * @seid: the SEID to delete from the switch
3953 * @cmd_details: pointer to command details structure or NULL
3954 *
3955 * This deletes a switch element from the switch.
3956 **/
i40e_aq_delete_element(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)3957 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3958 struct i40e_asq_cmd_details *cmd_details)
3959 {
3960 struct i40e_aq_desc desc;
3961 struct i40e_aqc_switch_seid *cmd =
3962 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3963 i40e_status status;
3964
3965 if (seid == 0)
3966 return I40E_ERR_PARAM;
3967
3968 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3969
3970 cmd->seid = cpu_to_le16(seid);
3971
3972 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3973 cmd_details, true);
3974
3975 return status;
3976 }
3977
3978 /**
3979 * i40e_aq_dcb_updated - DCB Updated Command
3980 * @hw: pointer to the hw struct
3981 * @cmd_details: pointer to command details structure or NULL
3982 *
3983 * EMP will return when the shared RPB settings have been
3984 * recomputed and modified. The retval field in the descriptor
3985 * will be set to 0 when RPB is modified.
3986 **/
i40e_aq_dcb_updated(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)3987 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3988 struct i40e_asq_cmd_details *cmd_details)
3989 {
3990 struct i40e_aq_desc desc;
3991 i40e_status status;
3992
3993 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3994
3995 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3996
3997 return status;
3998 }
3999
4000 /**
4001 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
4002 * @hw: pointer to the hw struct
4003 * @seid: seid for the physical port/switching component/vsi
4004 * @buff: Indirect buffer to hold data parameters and response
4005 * @buff_size: Indirect buffer size
4006 * @opcode: Tx scheduler AQ command opcode
4007 * @cmd_details: pointer to command details structure or NULL
4008 *
4009 * Generic command handler for Tx scheduler AQ commands
4010 **/
i40e_aq_tx_sched_cmd(struct i40e_hw * hw,u16 seid,void * buff,u16 buff_size,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)4011 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
4012 void *buff, u16 buff_size,
4013 enum i40e_admin_queue_opc opcode,
4014 struct i40e_asq_cmd_details *cmd_details)
4015 {
4016 struct i40e_aq_desc desc;
4017 struct i40e_aqc_tx_sched_ind *cmd =
4018 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4019 i40e_status status;
4020 bool cmd_param_flag = false;
4021
4022 switch (opcode) {
4023 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
4024 case i40e_aqc_opc_configure_vsi_tc_bw:
4025 case i40e_aqc_opc_enable_switching_comp_ets:
4026 case i40e_aqc_opc_modify_switching_comp_ets:
4027 case i40e_aqc_opc_disable_switching_comp_ets:
4028 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
4029 case i40e_aqc_opc_configure_switching_comp_bw_config:
4030 cmd_param_flag = true;
4031 break;
4032 case i40e_aqc_opc_query_vsi_bw_config:
4033 case i40e_aqc_opc_query_vsi_ets_sla_config:
4034 case i40e_aqc_opc_query_switching_comp_ets_config:
4035 case i40e_aqc_opc_query_port_ets_config:
4036 case i40e_aqc_opc_query_switching_comp_bw_config:
4037 cmd_param_flag = false;
4038 break;
4039 default:
4040 return I40E_ERR_PARAM;
4041 }
4042
4043 i40e_fill_default_direct_cmd_desc(&desc, opcode);
4044
4045 /* Indirect command */
4046 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4047 if (cmd_param_flag)
4048 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4049 if (buff_size > I40E_AQ_LARGE_BUF)
4050 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4051
4052 desc.datalen = cpu_to_le16(buff_size);
4053
4054 cmd->vsi_seid = cpu_to_le16(seid);
4055
4056 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4057
4058 return status;
4059 }
4060
4061 /**
4062 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
4063 * @hw: pointer to the hw struct
4064 * @seid: VSI seid
4065 * @credit: BW limit credits (0 = disabled)
4066 * @max_credit: Max BW limit credits
4067 * @cmd_details: pointer to command details structure or NULL
4068 **/
i40e_aq_config_vsi_bw_limit(struct i40e_hw * hw,u16 seid,u16 credit,u8 max_credit,struct i40e_asq_cmd_details * cmd_details)4069 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
4070 u16 seid, u16 credit, u8 max_credit,
4071 struct i40e_asq_cmd_details *cmd_details)
4072 {
4073 struct i40e_aq_desc desc;
4074 struct i40e_aqc_configure_vsi_bw_limit *cmd =
4075 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4076 i40e_status status;
4077
4078 i40e_fill_default_direct_cmd_desc(&desc,
4079 i40e_aqc_opc_configure_vsi_bw_limit);
4080
4081 cmd->vsi_seid = cpu_to_le16(seid);
4082 cmd->credit = cpu_to_le16(credit);
4083 cmd->max_credit = max_credit;
4084
4085 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4086
4087 return status;
4088 }
4089
4090 /**
4091 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4092 * @hw: pointer to the hw struct
4093 * @seid: VSI seid
4094 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4095 * @cmd_details: pointer to command details structure or NULL
4096 **/
i40e_aq_config_vsi_tc_bw(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_vsi_tc_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4097 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4098 u16 seid,
4099 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4100 struct i40e_asq_cmd_details *cmd_details)
4101 {
4102 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4103 i40e_aqc_opc_configure_vsi_tc_bw,
4104 cmd_details);
4105 }
4106
4107 /**
4108 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4109 * @hw: pointer to the hw struct
4110 * @seid: seid of the switching component connected to Physical Port
4111 * @ets_data: Buffer holding ETS parameters
4112 * @opcode: Tx scheduler AQ command opcode
4113 * @cmd_details: pointer to command details structure or NULL
4114 **/
i40e_aq_config_switch_comp_ets(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_ets_data * ets_data,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)4115 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4116 u16 seid,
4117 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4118 enum i40e_admin_queue_opc opcode,
4119 struct i40e_asq_cmd_details *cmd_details)
4120 {
4121 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4122 sizeof(*ets_data), opcode, cmd_details);
4123 }
4124
4125 /**
4126 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4127 * @hw: pointer to the hw struct
4128 * @seid: seid of the switching component
4129 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4130 * @cmd_details: pointer to command details structure or NULL
4131 **/
i40e_aq_config_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_bw_config_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4132 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4133 u16 seid,
4134 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4135 struct i40e_asq_cmd_details *cmd_details)
4136 {
4137 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4138 i40e_aqc_opc_configure_switching_comp_bw_config,
4139 cmd_details);
4140 }
4141
4142 /**
4143 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4144 * @hw: pointer to the hw struct
4145 * @seid: seid of the VSI
4146 * @bw_data: Buffer to hold VSI BW configuration
4147 * @cmd_details: pointer to command details structure or NULL
4148 **/
i40e_aq_query_vsi_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4149 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4150 u16 seid,
4151 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4152 struct i40e_asq_cmd_details *cmd_details)
4153 {
4154 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4155 i40e_aqc_opc_query_vsi_bw_config,
4156 cmd_details);
4157 }
4158
4159 /**
4160 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4161 * @hw: pointer to the hw struct
4162 * @seid: seid of the VSI
4163 * @bw_data: Buffer to hold VSI BW configuration per TC
4164 * @cmd_details: pointer to command details structure or NULL
4165 **/
i40e_aq_query_vsi_ets_sla_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_ets_sla_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4166 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4167 u16 seid,
4168 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4169 struct i40e_asq_cmd_details *cmd_details)
4170 {
4171 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4172 i40e_aqc_opc_query_vsi_ets_sla_config,
4173 cmd_details);
4174 }
4175
4176 /**
4177 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4178 * @hw: pointer to the hw struct
4179 * @seid: seid of the switching component
4180 * @bw_data: Buffer to hold switching component's per TC BW config
4181 * @cmd_details: pointer to command details structure or NULL
4182 **/
i40e_aq_query_switch_comp_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4183 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4184 u16 seid,
4185 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4186 struct i40e_asq_cmd_details *cmd_details)
4187 {
4188 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4189 i40e_aqc_opc_query_switching_comp_ets_config,
4190 cmd_details);
4191 }
4192
4193 /**
4194 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4195 * @hw: pointer to the hw struct
4196 * @seid: seid of the VSI or switching component connected to Physical Port
4197 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4198 * @cmd_details: pointer to command details structure or NULL
4199 **/
i40e_aq_query_port_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_port_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4200 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4201 u16 seid,
4202 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4203 struct i40e_asq_cmd_details *cmd_details)
4204 {
4205 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4206 i40e_aqc_opc_query_port_ets_config,
4207 cmd_details);
4208 }
4209
4210 /**
4211 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4212 * @hw: pointer to the hw struct
4213 * @seid: seid of the switching component
4214 * @bw_data: Buffer to hold switching component's BW configuration
4215 * @cmd_details: pointer to command details structure or NULL
4216 **/
i40e_aq_query_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4217 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4218 u16 seid,
4219 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4220 struct i40e_asq_cmd_details *cmd_details)
4221 {
4222 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4223 i40e_aqc_opc_query_switching_comp_bw_config,
4224 cmd_details);
4225 }
4226
4227 /**
4228 * i40e_validate_filter_settings
4229 * @hw: pointer to the hardware structure
4230 * @settings: Filter control settings
4231 *
4232 * Check and validate the filter control settings passed.
4233 * The function checks for the valid filter/context sizes being
4234 * passed for FCoE and PE.
4235 *
4236 * Returns 0 if the values passed are valid and within
4237 * range else returns an error.
4238 **/
i40e_validate_filter_settings(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4239 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4240 struct i40e_filter_control_settings *settings)
4241 {
4242 u32 fcoe_cntx_size, fcoe_filt_size;
4243 u32 fcoe_fmax;
4244 u32 val;
4245
4246 /* Validate FCoE settings passed */
4247 switch (settings->fcoe_filt_num) {
4248 case I40E_HASH_FILTER_SIZE_1K:
4249 case I40E_HASH_FILTER_SIZE_2K:
4250 case I40E_HASH_FILTER_SIZE_4K:
4251 case I40E_HASH_FILTER_SIZE_8K:
4252 case I40E_HASH_FILTER_SIZE_16K:
4253 case I40E_HASH_FILTER_SIZE_32K:
4254 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4255 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4256 break;
4257 default:
4258 return I40E_ERR_PARAM;
4259 }
4260
4261 switch (settings->fcoe_cntx_num) {
4262 case I40E_DMA_CNTX_SIZE_512:
4263 case I40E_DMA_CNTX_SIZE_1K:
4264 case I40E_DMA_CNTX_SIZE_2K:
4265 case I40E_DMA_CNTX_SIZE_4K:
4266 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4267 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4268 break;
4269 default:
4270 return I40E_ERR_PARAM;
4271 }
4272
4273 /* Validate PE settings passed */
4274 switch (settings->pe_filt_num) {
4275 case I40E_HASH_FILTER_SIZE_1K:
4276 case I40E_HASH_FILTER_SIZE_2K:
4277 case I40E_HASH_FILTER_SIZE_4K:
4278 case I40E_HASH_FILTER_SIZE_8K:
4279 case I40E_HASH_FILTER_SIZE_16K:
4280 case I40E_HASH_FILTER_SIZE_32K:
4281 case I40E_HASH_FILTER_SIZE_64K:
4282 case I40E_HASH_FILTER_SIZE_128K:
4283 case I40E_HASH_FILTER_SIZE_256K:
4284 case I40E_HASH_FILTER_SIZE_512K:
4285 case I40E_HASH_FILTER_SIZE_1M:
4286 break;
4287 default:
4288 return I40E_ERR_PARAM;
4289 }
4290
4291 switch (settings->pe_cntx_num) {
4292 case I40E_DMA_CNTX_SIZE_512:
4293 case I40E_DMA_CNTX_SIZE_1K:
4294 case I40E_DMA_CNTX_SIZE_2K:
4295 case I40E_DMA_CNTX_SIZE_4K:
4296 case I40E_DMA_CNTX_SIZE_8K:
4297 case I40E_DMA_CNTX_SIZE_16K:
4298 case I40E_DMA_CNTX_SIZE_32K:
4299 case I40E_DMA_CNTX_SIZE_64K:
4300 case I40E_DMA_CNTX_SIZE_128K:
4301 case I40E_DMA_CNTX_SIZE_256K:
4302 break;
4303 default:
4304 return I40E_ERR_PARAM;
4305 }
4306
4307 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4308 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4309 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4310 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4311 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4312 return I40E_ERR_INVALID_SIZE;
4313
4314 return 0;
4315 }
4316
4317 /**
4318 * i40e_set_filter_control
4319 * @hw: pointer to the hardware structure
4320 * @settings: Filter control settings
4321 *
4322 * Set the Queue Filters for PE/FCoE and enable filters required
4323 * for a single PF. It is expected that these settings are programmed
4324 * at the driver initialization time.
4325 **/
i40e_set_filter_control(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4326 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4327 struct i40e_filter_control_settings *settings)
4328 {
4329 i40e_status ret = 0;
4330 u32 hash_lut_size = 0;
4331 u32 val;
4332
4333 if (!settings)
4334 return I40E_ERR_PARAM;
4335
4336 /* Validate the input settings */
4337 ret = i40e_validate_filter_settings(hw, settings);
4338 if (ret)
4339 return ret;
4340
4341 /* Read the PF Queue Filter control register */
4342 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4343
4344 /* Program required PE hash buckets for the PF */
4345 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4346 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4347 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4348 /* Program required PE contexts for the PF */
4349 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4350 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4351 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4352
4353 /* Program required FCoE hash buckets for the PF */
4354 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4355 val |= ((u32)settings->fcoe_filt_num <<
4356 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4357 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4358 /* Program required FCoE DDP contexts for the PF */
4359 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4360 val |= ((u32)settings->fcoe_cntx_num <<
4361 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4362 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4363
4364 /* Program Hash LUT size for the PF */
4365 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4366 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4367 hash_lut_size = 1;
4368 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4369 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4370
4371 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4372 if (settings->enable_fdir)
4373 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4374 if (settings->enable_ethtype)
4375 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4376 if (settings->enable_macvlan)
4377 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4378
4379 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4380
4381 return 0;
4382 }
4383
4384 /**
4385 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4386 * @hw: pointer to the hw struct
4387 * @mac_addr: MAC address to use in the filter
4388 * @ethtype: Ethertype to use in the filter
4389 * @flags: Flags that needs to be applied to the filter
4390 * @vsi_seid: seid of the control VSI
4391 * @queue: VSI queue number to send the packet to
4392 * @is_add: Add control packet filter if True else remove
4393 * @stats: Structure to hold information on control filter counts
4394 * @cmd_details: pointer to command details structure or NULL
4395 *
4396 * This command will Add or Remove control packet filter for a control VSI.
4397 * In return it will update the total number of perfect filter count in
4398 * the stats member.
4399 **/
i40e_aq_add_rem_control_packet_filter(struct i40e_hw * hw,u8 * mac_addr,u16 ethtype,u16 flags,u16 vsi_seid,u16 queue,bool is_add,struct i40e_control_filter_stats * stats,struct i40e_asq_cmd_details * cmd_details)4400 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4401 u8 *mac_addr, u16 ethtype, u16 flags,
4402 u16 vsi_seid, u16 queue, bool is_add,
4403 struct i40e_control_filter_stats *stats,
4404 struct i40e_asq_cmd_details *cmd_details)
4405 {
4406 struct i40e_aq_desc desc;
4407 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4408 (struct i40e_aqc_add_remove_control_packet_filter *)
4409 &desc.params.raw;
4410 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4411 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4412 &desc.params.raw;
4413 i40e_status status;
4414
4415 if (vsi_seid == 0)
4416 return I40E_ERR_PARAM;
4417
4418 if (is_add) {
4419 i40e_fill_default_direct_cmd_desc(&desc,
4420 i40e_aqc_opc_add_control_packet_filter);
4421 cmd->queue = cpu_to_le16(queue);
4422 } else {
4423 i40e_fill_default_direct_cmd_desc(&desc,
4424 i40e_aqc_opc_remove_control_packet_filter);
4425 }
4426
4427 if (mac_addr)
4428 ether_addr_copy(cmd->mac, mac_addr);
4429
4430 cmd->etype = cpu_to_le16(ethtype);
4431 cmd->flags = cpu_to_le16(flags);
4432 cmd->seid = cpu_to_le16(vsi_seid);
4433
4434 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4435
4436 if (!status && stats) {
4437 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4438 stats->etype_used = le16_to_cpu(resp->etype_used);
4439 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4440 stats->etype_free = le16_to_cpu(resp->etype_free);
4441 }
4442
4443 return status;
4444 }
4445
4446 /**
4447 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4448 * @hw: pointer to the hw struct
4449 * @seid: VSI seid to add ethertype filter from
4450 **/
i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw * hw,u16 seid)4451 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4452 u16 seid)
4453 {
4454 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4455 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4456 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4457 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4458 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4459 i40e_status status;
4460
4461 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4462 seid, 0, true, NULL,
4463 NULL);
4464 if (status)
4465 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4466 }
4467
4468 /**
4469 * i40e_aq_alternate_read
4470 * @hw: pointer to the hardware structure
4471 * @reg_addr0: address of first dword to be read
4472 * @reg_val0: pointer for data read from 'reg_addr0'
4473 * @reg_addr1: address of second dword to be read
4474 * @reg_val1: pointer for data read from 'reg_addr1'
4475 *
4476 * Read one or two dwords from alternate structure. Fields are indicated
4477 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4478 * is not passed then only register at 'reg_addr0' is read.
4479 *
4480 **/
i40e_aq_alternate_read(struct i40e_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)4481 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4482 u32 reg_addr0, u32 *reg_val0,
4483 u32 reg_addr1, u32 *reg_val1)
4484 {
4485 struct i40e_aq_desc desc;
4486 struct i40e_aqc_alternate_write *cmd_resp =
4487 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4488 i40e_status status;
4489
4490 if (!reg_val0)
4491 return I40E_ERR_PARAM;
4492
4493 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4494 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4495 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4496
4497 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4498
4499 if (!status) {
4500 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4501
4502 if (reg_val1)
4503 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4504 }
4505
4506 return status;
4507 }
4508
4509 /**
4510 * i40e_aq_suspend_port_tx
4511 * @hw: pointer to the hardware structure
4512 * @seid: port seid
4513 * @cmd_details: pointer to command details structure or NULL
4514 *
4515 * Suspend port's Tx traffic
4516 **/
i40e_aq_suspend_port_tx(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)4517 i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4518 struct i40e_asq_cmd_details *cmd_details)
4519 {
4520 struct i40e_aqc_tx_sched_ind *cmd;
4521 struct i40e_aq_desc desc;
4522 i40e_status status;
4523
4524 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4525 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4526 cmd->vsi_seid = cpu_to_le16(seid);
4527 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4528
4529 return status;
4530 }
4531
4532 /**
4533 * i40e_aq_resume_port_tx
4534 * @hw: pointer to the hardware structure
4535 * @cmd_details: pointer to command details structure or NULL
4536 *
4537 * Resume port's Tx traffic
4538 **/
i40e_aq_resume_port_tx(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)4539 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4540 struct i40e_asq_cmd_details *cmd_details)
4541 {
4542 struct i40e_aq_desc desc;
4543 i40e_status status;
4544
4545 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4546
4547 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4548
4549 return status;
4550 }
4551
4552 /**
4553 * i40e_set_pci_config_data - store PCI bus info
4554 * @hw: pointer to hardware structure
4555 * @link_status: the link status word from PCI config space
4556 *
4557 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4558 **/
i40e_set_pci_config_data(struct i40e_hw * hw,u16 link_status)4559 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4560 {
4561 hw->bus.type = i40e_bus_type_pci_express;
4562
4563 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4564 case PCI_EXP_LNKSTA_NLW_X1:
4565 hw->bus.width = i40e_bus_width_pcie_x1;
4566 break;
4567 case PCI_EXP_LNKSTA_NLW_X2:
4568 hw->bus.width = i40e_bus_width_pcie_x2;
4569 break;
4570 case PCI_EXP_LNKSTA_NLW_X4:
4571 hw->bus.width = i40e_bus_width_pcie_x4;
4572 break;
4573 case PCI_EXP_LNKSTA_NLW_X8:
4574 hw->bus.width = i40e_bus_width_pcie_x8;
4575 break;
4576 default:
4577 hw->bus.width = i40e_bus_width_unknown;
4578 break;
4579 }
4580
4581 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4582 case PCI_EXP_LNKSTA_CLS_2_5GB:
4583 hw->bus.speed = i40e_bus_speed_2500;
4584 break;
4585 case PCI_EXP_LNKSTA_CLS_5_0GB:
4586 hw->bus.speed = i40e_bus_speed_5000;
4587 break;
4588 case PCI_EXP_LNKSTA_CLS_8_0GB:
4589 hw->bus.speed = i40e_bus_speed_8000;
4590 break;
4591 default:
4592 hw->bus.speed = i40e_bus_speed_unknown;
4593 break;
4594 }
4595 }
4596
4597 /**
4598 * i40e_aq_debug_dump
4599 * @hw: pointer to the hardware structure
4600 * @cluster_id: specific cluster to dump
4601 * @table_id: table id within cluster
4602 * @start_index: index of line in the block to read
4603 * @buff_size: dump buffer size
4604 * @buff: dump buffer
4605 * @ret_buff_size: actual buffer size returned
4606 * @ret_next_table: next block to read
4607 * @ret_next_index: next index to read
4608 * @cmd_details: pointer to command details structure or NULL
4609 *
4610 * Dump internal FW/HW data for debug purposes.
4611 *
4612 **/
i40e_aq_debug_dump(struct i40e_hw * hw,u8 cluster_id,u8 table_id,u32 start_index,u16 buff_size,void * buff,u16 * ret_buff_size,u8 * ret_next_table,u32 * ret_next_index,struct i40e_asq_cmd_details * cmd_details)4613 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4614 u8 table_id, u32 start_index, u16 buff_size,
4615 void *buff, u16 *ret_buff_size,
4616 u8 *ret_next_table, u32 *ret_next_index,
4617 struct i40e_asq_cmd_details *cmd_details)
4618 {
4619 struct i40e_aq_desc desc;
4620 struct i40e_aqc_debug_dump_internals *cmd =
4621 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4622 struct i40e_aqc_debug_dump_internals *resp =
4623 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4624 i40e_status status;
4625
4626 if (buff_size == 0 || !buff)
4627 return I40E_ERR_PARAM;
4628
4629 i40e_fill_default_direct_cmd_desc(&desc,
4630 i40e_aqc_opc_debug_dump_internals);
4631 /* Indirect Command */
4632 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4633 if (buff_size > I40E_AQ_LARGE_BUF)
4634 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4635
4636 cmd->cluster_id = cluster_id;
4637 cmd->table_id = table_id;
4638 cmd->idx = cpu_to_le32(start_index);
4639
4640 desc.datalen = cpu_to_le16(buff_size);
4641
4642 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4643 if (!status) {
4644 if (ret_buff_size)
4645 *ret_buff_size = le16_to_cpu(desc.datalen);
4646 if (ret_next_table)
4647 *ret_next_table = resp->table_id;
4648 if (ret_next_index)
4649 *ret_next_index = le32_to_cpu(resp->idx);
4650 }
4651
4652 return status;
4653 }
4654
4655 /**
4656 * i40e_read_bw_from_alt_ram
4657 * @hw: pointer to the hardware structure
4658 * @max_bw: pointer for max_bw read
4659 * @min_bw: pointer for min_bw read
4660 * @min_valid: pointer for bool that is true if min_bw is a valid value
4661 * @max_valid: pointer for bool that is true if max_bw is a valid value
4662 *
4663 * Read bw from the alternate ram for the given pf
4664 **/
i40e_read_bw_from_alt_ram(struct i40e_hw * hw,u32 * max_bw,u32 * min_bw,bool * min_valid,bool * max_valid)4665 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4666 u32 *max_bw, u32 *min_bw,
4667 bool *min_valid, bool *max_valid)
4668 {
4669 i40e_status status;
4670 u32 max_bw_addr, min_bw_addr;
4671
4672 /* Calculate the address of the min/max bw registers */
4673 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4674 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4675 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4676 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4677 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4678 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4679
4680 /* Read the bandwidths from alt ram */
4681 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4682 min_bw_addr, min_bw);
4683
4684 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4685 *min_valid = true;
4686 else
4687 *min_valid = false;
4688
4689 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4690 *max_valid = true;
4691 else
4692 *max_valid = false;
4693
4694 return status;
4695 }
4696
4697 /**
4698 * i40e_aq_configure_partition_bw
4699 * @hw: pointer to the hardware structure
4700 * @bw_data: Buffer holding valid pfs and bw limits
4701 * @cmd_details: pointer to command details
4702 *
4703 * Configure partitions guaranteed/max bw
4704 **/
i40e_aq_configure_partition_bw(struct i40e_hw * hw,struct i40e_aqc_configure_partition_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4705 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4706 struct i40e_aqc_configure_partition_bw_data *bw_data,
4707 struct i40e_asq_cmd_details *cmd_details)
4708 {
4709 i40e_status status;
4710 struct i40e_aq_desc desc;
4711 u16 bwd_size = sizeof(*bw_data);
4712
4713 i40e_fill_default_direct_cmd_desc(&desc,
4714 i40e_aqc_opc_configure_partition_bw);
4715
4716 /* Indirect command */
4717 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4718 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4719
4720 if (bwd_size > I40E_AQ_LARGE_BUF)
4721 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4722
4723 desc.datalen = cpu_to_le16(bwd_size);
4724
4725 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4726 cmd_details);
4727
4728 return status;
4729 }
4730
4731 /**
4732 * i40e_read_phy_register_clause22
4733 * @hw: pointer to the HW structure
4734 * @reg: register address in the page
4735 * @phy_addr: PHY address on MDIO interface
4736 * @value: PHY register value
4737 *
4738 * Reads specified PHY register value
4739 **/
i40e_read_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 * value)4740 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4741 u16 reg, u8 phy_addr, u16 *value)
4742 {
4743 i40e_status status = I40E_ERR_TIMEOUT;
4744 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4745 u32 command = 0;
4746 u16 retry = 1000;
4747
4748 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4749 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4750 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4751 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4752 (I40E_GLGEN_MSCA_MDICMD_MASK);
4753 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4754 do {
4755 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4756 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4757 status = 0;
4758 break;
4759 }
4760 udelay(10);
4761 retry--;
4762 } while (retry);
4763
4764 if (status) {
4765 i40e_debug(hw, I40E_DEBUG_PHY,
4766 "PHY: Can't write command to external PHY.\n");
4767 } else {
4768 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4769 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4770 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4771 }
4772
4773 return status;
4774 }
4775
4776 /**
4777 * i40e_write_phy_register_clause22
4778 * @hw: pointer to the HW structure
4779 * @reg: register address in the page
4780 * @phy_addr: PHY address on MDIO interface
4781 * @value: PHY register value
4782 *
4783 * Writes specified PHY register value
4784 **/
i40e_write_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 value)4785 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4786 u16 reg, u8 phy_addr, u16 value)
4787 {
4788 i40e_status status = I40E_ERR_TIMEOUT;
4789 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4790 u32 command = 0;
4791 u16 retry = 1000;
4792
4793 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4794 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4795
4796 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4797 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4798 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4799 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4800 (I40E_GLGEN_MSCA_MDICMD_MASK);
4801
4802 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4803 do {
4804 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4805 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4806 status = 0;
4807 break;
4808 }
4809 udelay(10);
4810 retry--;
4811 } while (retry);
4812
4813 return status;
4814 }
4815
4816 /**
4817 * i40e_read_phy_register_clause45
4818 * @hw: pointer to the HW structure
4819 * @page: registers page number
4820 * @reg: register address in the page
4821 * @phy_addr: PHY address on MDIO interface
4822 * @value: PHY register value
4823 *
4824 * Reads specified PHY register value
4825 **/
i40e_read_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4826 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4827 u8 page, u16 reg, u8 phy_addr, u16 *value)
4828 {
4829 i40e_status status = I40E_ERR_TIMEOUT;
4830 u32 command = 0;
4831 u16 retry = 1000;
4832 u8 port_num = hw->func_caps.mdio_port_num;
4833
4834 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4835 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4836 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4837 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4838 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4839 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4840 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4841 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4842 do {
4843 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4844 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4845 status = 0;
4846 break;
4847 }
4848 usleep_range(10, 20);
4849 retry--;
4850 } while (retry);
4851
4852 if (status) {
4853 i40e_debug(hw, I40E_DEBUG_PHY,
4854 "PHY: Can't write command to external PHY.\n");
4855 goto phy_read_end;
4856 }
4857
4858 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4859 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4860 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4861 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4862 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4863 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4864 status = I40E_ERR_TIMEOUT;
4865 retry = 1000;
4866 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4867 do {
4868 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4869 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4870 status = 0;
4871 break;
4872 }
4873 usleep_range(10, 20);
4874 retry--;
4875 } while (retry);
4876
4877 if (!status) {
4878 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4879 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4880 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4881 } else {
4882 i40e_debug(hw, I40E_DEBUG_PHY,
4883 "PHY: Can't read register value from external PHY.\n");
4884 }
4885
4886 phy_read_end:
4887 return status;
4888 }
4889
4890 /**
4891 * i40e_write_phy_register_clause45
4892 * @hw: pointer to the HW structure
4893 * @page: registers page number
4894 * @reg: register address in the page
4895 * @phy_addr: PHY address on MDIO interface
4896 * @value: PHY register value
4897 *
4898 * Writes value to specified PHY register
4899 **/
i40e_write_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4900 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4901 u8 page, u16 reg, u8 phy_addr, u16 value)
4902 {
4903 i40e_status status = I40E_ERR_TIMEOUT;
4904 u32 command = 0;
4905 u16 retry = 1000;
4906 u8 port_num = hw->func_caps.mdio_port_num;
4907
4908 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4909 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4910 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4911 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4912 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4913 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4914 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4915 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4916 do {
4917 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4918 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4919 status = 0;
4920 break;
4921 }
4922 usleep_range(10, 20);
4923 retry--;
4924 } while (retry);
4925 if (status) {
4926 i40e_debug(hw, I40E_DEBUG_PHY,
4927 "PHY: Can't write command to external PHY.\n");
4928 goto phy_write_end;
4929 }
4930
4931 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4932 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4933
4934 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4935 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4936 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4937 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4938 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4939 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4940 status = I40E_ERR_TIMEOUT;
4941 retry = 1000;
4942 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4943 do {
4944 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4945 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4946 status = 0;
4947 break;
4948 }
4949 usleep_range(10, 20);
4950 retry--;
4951 } while (retry);
4952
4953 phy_write_end:
4954 return status;
4955 }
4956
4957 /**
4958 * i40e_write_phy_register
4959 * @hw: pointer to the HW structure
4960 * @page: registers page number
4961 * @reg: register address in the page
4962 * @phy_addr: PHY address on MDIO interface
4963 * @value: PHY register value
4964 *
4965 * Writes value to specified PHY register
4966 **/
i40e_write_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4967 i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4968 u8 page, u16 reg, u8 phy_addr, u16 value)
4969 {
4970 i40e_status status;
4971
4972 switch (hw->device_id) {
4973 case I40E_DEV_ID_1G_BASE_T_X722:
4974 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4975 value);
4976 break;
4977 case I40E_DEV_ID_5G_BASE_T_BC:
4978 case I40E_DEV_ID_10G_BASE_T:
4979 case I40E_DEV_ID_10G_BASE_T4:
4980 case I40E_DEV_ID_10G_BASE_T_BC:
4981 case I40E_DEV_ID_10G_BASE_T_X722:
4982 case I40E_DEV_ID_25G_B:
4983 case I40E_DEV_ID_25G_SFP28:
4984 status = i40e_write_phy_register_clause45(hw, page, reg,
4985 phy_addr, value);
4986 break;
4987 default:
4988 status = I40E_ERR_UNKNOWN_PHY;
4989 break;
4990 }
4991
4992 return status;
4993 }
4994
4995 /**
4996 * i40e_read_phy_register
4997 * @hw: pointer to the HW structure
4998 * @page: registers page number
4999 * @reg: register address in the page
5000 * @phy_addr: PHY address on MDIO interface
5001 * @value: PHY register value
5002 *
5003 * Reads specified PHY register value
5004 **/
i40e_read_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)5005 i40e_status i40e_read_phy_register(struct i40e_hw *hw,
5006 u8 page, u16 reg, u8 phy_addr, u16 *value)
5007 {
5008 i40e_status status;
5009
5010 switch (hw->device_id) {
5011 case I40E_DEV_ID_1G_BASE_T_X722:
5012 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
5013 value);
5014 break;
5015 case I40E_DEV_ID_5G_BASE_T_BC:
5016 case I40E_DEV_ID_10G_BASE_T:
5017 case I40E_DEV_ID_10G_BASE_T4:
5018 case I40E_DEV_ID_10G_BASE_T_BC:
5019 case I40E_DEV_ID_10G_BASE_T_X722:
5020 case I40E_DEV_ID_25G_B:
5021 case I40E_DEV_ID_25G_SFP28:
5022 status = i40e_read_phy_register_clause45(hw, page, reg,
5023 phy_addr, value);
5024 break;
5025 default:
5026 status = I40E_ERR_UNKNOWN_PHY;
5027 break;
5028 }
5029
5030 return status;
5031 }
5032
5033 /**
5034 * i40e_get_phy_address
5035 * @hw: pointer to the HW structure
5036 * @dev_num: PHY port num that address we want
5037 *
5038 * Gets PHY address for current port
5039 **/
i40e_get_phy_address(struct i40e_hw * hw,u8 dev_num)5040 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
5041 {
5042 u8 port_num = hw->func_caps.mdio_port_num;
5043 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
5044
5045 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
5046 }
5047
5048 /**
5049 * i40e_blink_phy_link_led
5050 * @hw: pointer to the HW structure
5051 * @time: time how long led will blinks in secs
5052 * @interval: gap between LED on and off in msecs
5053 *
5054 * Blinks PHY link LED
5055 **/
i40e_blink_phy_link_led(struct i40e_hw * hw,u32 time,u32 interval)5056 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
5057 u32 time, u32 interval)
5058 {
5059 i40e_status status = 0;
5060 u32 i;
5061 u16 led_ctl;
5062 u16 gpio_led_port;
5063 u16 led_reg;
5064 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
5065 u8 phy_addr = 0;
5066 u8 port_num;
5067
5068 i = rd32(hw, I40E_PFGEN_PORTNUM);
5069 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5070 phy_addr = i40e_get_phy_address(hw, port_num);
5071
5072 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5073 led_addr++) {
5074 status = i40e_read_phy_register_clause45(hw,
5075 I40E_PHY_COM_REG_PAGE,
5076 led_addr, phy_addr,
5077 &led_reg);
5078 if (status)
5079 goto phy_blinking_end;
5080 led_ctl = led_reg;
5081 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5082 led_reg = 0;
5083 status = i40e_write_phy_register_clause45(hw,
5084 I40E_PHY_COM_REG_PAGE,
5085 led_addr, phy_addr,
5086 led_reg);
5087 if (status)
5088 goto phy_blinking_end;
5089 break;
5090 }
5091 }
5092
5093 if (time > 0 && interval > 0) {
5094 for (i = 0; i < time * 1000; i += interval) {
5095 status = i40e_read_phy_register_clause45(hw,
5096 I40E_PHY_COM_REG_PAGE,
5097 led_addr, phy_addr, &led_reg);
5098 if (status)
5099 goto restore_config;
5100 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5101 led_reg = 0;
5102 else
5103 led_reg = I40E_PHY_LED_MANUAL_ON;
5104 status = i40e_write_phy_register_clause45(hw,
5105 I40E_PHY_COM_REG_PAGE,
5106 led_addr, phy_addr, led_reg);
5107 if (status)
5108 goto restore_config;
5109 msleep(interval);
5110 }
5111 }
5112
5113 restore_config:
5114 status = i40e_write_phy_register_clause45(hw,
5115 I40E_PHY_COM_REG_PAGE,
5116 led_addr, phy_addr, led_ctl);
5117
5118 phy_blinking_end:
5119 return status;
5120 }
5121
5122 /**
5123 * i40e_led_get_reg - read LED register
5124 * @hw: pointer to the HW structure
5125 * @led_addr: LED register address
5126 * @reg_val: read register value
5127 **/
i40e_led_get_reg(struct i40e_hw * hw,u16 led_addr,u32 * reg_val)5128 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5129 u32 *reg_val)
5130 {
5131 enum i40e_status_code status;
5132 u8 phy_addr = 0;
5133 u8 port_num;
5134 u32 i;
5135
5136 *reg_val = 0;
5137 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5138 status =
5139 i40e_aq_get_phy_register(hw,
5140 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5141 I40E_PHY_COM_REG_PAGE, true,
5142 I40E_PHY_LED_PROV_REG_1,
5143 reg_val, NULL);
5144 } else {
5145 i = rd32(hw, I40E_PFGEN_PORTNUM);
5146 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5147 phy_addr = i40e_get_phy_address(hw, port_num);
5148 status = i40e_read_phy_register_clause45(hw,
5149 I40E_PHY_COM_REG_PAGE,
5150 led_addr, phy_addr,
5151 (u16 *)reg_val);
5152 }
5153 return status;
5154 }
5155
5156 /**
5157 * i40e_led_set_reg - write LED register
5158 * @hw: pointer to the HW structure
5159 * @led_addr: LED register address
5160 * @reg_val: register value to write
5161 **/
i40e_led_set_reg(struct i40e_hw * hw,u16 led_addr,u32 reg_val)5162 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5163 u32 reg_val)
5164 {
5165 enum i40e_status_code status;
5166 u8 phy_addr = 0;
5167 u8 port_num;
5168 u32 i;
5169
5170 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5171 status =
5172 i40e_aq_set_phy_register(hw,
5173 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5174 I40E_PHY_COM_REG_PAGE, true,
5175 I40E_PHY_LED_PROV_REG_1,
5176 reg_val, NULL);
5177 } else {
5178 i = rd32(hw, I40E_PFGEN_PORTNUM);
5179 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5180 phy_addr = i40e_get_phy_address(hw, port_num);
5181 status = i40e_write_phy_register_clause45(hw,
5182 I40E_PHY_COM_REG_PAGE,
5183 led_addr, phy_addr,
5184 (u16)reg_val);
5185 }
5186
5187 return status;
5188 }
5189
5190 /**
5191 * i40e_led_get_phy - return current on/off mode
5192 * @hw: pointer to the hw struct
5193 * @led_addr: address of led register to use
5194 * @val: original value of register to use
5195 *
5196 **/
i40e_led_get_phy(struct i40e_hw * hw,u16 * led_addr,u16 * val)5197 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5198 u16 *val)
5199 {
5200 i40e_status status = 0;
5201 u16 gpio_led_port;
5202 u8 phy_addr = 0;
5203 u16 reg_val;
5204 u16 temp_addr;
5205 u8 port_num;
5206 u32 i;
5207 u32 reg_val_aq;
5208
5209 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5210 status =
5211 i40e_aq_get_phy_register(hw,
5212 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5213 I40E_PHY_COM_REG_PAGE, true,
5214 I40E_PHY_LED_PROV_REG_1,
5215 ®_val_aq, NULL);
5216 if (status == I40E_SUCCESS)
5217 *val = (u16)reg_val_aq;
5218 return status;
5219 }
5220 temp_addr = I40E_PHY_LED_PROV_REG_1;
5221 i = rd32(hw, I40E_PFGEN_PORTNUM);
5222 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5223 phy_addr = i40e_get_phy_address(hw, port_num);
5224
5225 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5226 temp_addr++) {
5227 status = i40e_read_phy_register_clause45(hw,
5228 I40E_PHY_COM_REG_PAGE,
5229 temp_addr, phy_addr,
5230 ®_val);
5231 if (status)
5232 return status;
5233 *val = reg_val;
5234 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5235 *led_addr = temp_addr;
5236 break;
5237 }
5238 }
5239 return status;
5240 }
5241
5242 /**
5243 * i40e_led_set_phy
5244 * @hw: pointer to the HW structure
5245 * @on: true or false
5246 * @led_addr: address of led register to use
5247 * @mode: original val plus bit for set or ignore
5248 *
5249 * Set led's on or off when controlled by the PHY
5250 *
5251 **/
i40e_led_set_phy(struct i40e_hw * hw,bool on,u16 led_addr,u32 mode)5252 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5253 u16 led_addr, u32 mode)
5254 {
5255 i40e_status status = 0;
5256 u32 led_ctl = 0;
5257 u32 led_reg = 0;
5258
5259 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5260 if (status)
5261 return status;
5262 led_ctl = led_reg;
5263 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5264 led_reg = 0;
5265 status = i40e_led_set_reg(hw, led_addr, led_reg);
5266 if (status)
5267 return status;
5268 }
5269 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5270 if (status)
5271 goto restore_config;
5272 if (on)
5273 led_reg = I40E_PHY_LED_MANUAL_ON;
5274 else
5275 led_reg = 0;
5276
5277 status = i40e_led_set_reg(hw, led_addr, led_reg);
5278 if (status)
5279 goto restore_config;
5280 if (mode & I40E_PHY_LED_MODE_ORIG) {
5281 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5282 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5283 }
5284 return status;
5285
5286 restore_config:
5287 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5288 return status;
5289 }
5290
5291 /**
5292 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5293 * @hw: pointer to the hw struct
5294 * @reg_addr: register address
5295 * @reg_val: ptr to register value
5296 * @cmd_details: pointer to command details structure or NULL
5297 *
5298 * Use the firmware to read the Rx control register,
5299 * especially useful if the Rx unit is under heavy pressure
5300 **/
i40e_aq_rx_ctl_read_register(struct i40e_hw * hw,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5301 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5302 u32 reg_addr, u32 *reg_val,
5303 struct i40e_asq_cmd_details *cmd_details)
5304 {
5305 struct i40e_aq_desc desc;
5306 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5307 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5308 i40e_status status;
5309
5310 if (!reg_val)
5311 return I40E_ERR_PARAM;
5312
5313 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5314
5315 cmd_resp->address = cpu_to_le32(reg_addr);
5316
5317 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5318
5319 if (status == 0)
5320 *reg_val = le32_to_cpu(cmd_resp->value);
5321
5322 return status;
5323 }
5324
5325 /**
5326 * i40e_read_rx_ctl - read from an Rx control register
5327 * @hw: pointer to the hw struct
5328 * @reg_addr: register address
5329 **/
i40e_read_rx_ctl(struct i40e_hw * hw,u32 reg_addr)5330 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5331 {
5332 i40e_status status = 0;
5333 bool use_register;
5334 int retry = 5;
5335 u32 val = 0;
5336
5337 use_register = (((hw->aq.api_maj_ver == 1) &&
5338 (hw->aq.api_min_ver < 5)) ||
5339 (hw->mac.type == I40E_MAC_X722));
5340 if (!use_register) {
5341 do_retry:
5342 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5343 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5344 usleep_range(1000, 2000);
5345 retry--;
5346 goto do_retry;
5347 }
5348 }
5349
5350 /* if the AQ access failed, try the old-fashioned way */
5351 if (status || use_register)
5352 val = rd32(hw, reg_addr);
5353
5354 return val;
5355 }
5356
5357 /**
5358 * i40e_aq_rx_ctl_write_register
5359 * @hw: pointer to the hw struct
5360 * @reg_addr: register address
5361 * @reg_val: register value
5362 * @cmd_details: pointer to command details structure or NULL
5363 *
5364 * Use the firmware to write to an Rx control register,
5365 * especially useful if the Rx unit is under heavy pressure
5366 **/
i40e_aq_rx_ctl_write_register(struct i40e_hw * hw,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5367 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5368 u32 reg_addr, u32 reg_val,
5369 struct i40e_asq_cmd_details *cmd_details)
5370 {
5371 struct i40e_aq_desc desc;
5372 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5373 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5374 i40e_status status;
5375
5376 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5377
5378 cmd->address = cpu_to_le32(reg_addr);
5379 cmd->value = cpu_to_le32(reg_val);
5380
5381 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5382
5383 return status;
5384 }
5385
5386 /**
5387 * i40e_write_rx_ctl - write to an Rx control register
5388 * @hw: pointer to the hw struct
5389 * @reg_addr: register address
5390 * @reg_val: register value
5391 **/
i40e_write_rx_ctl(struct i40e_hw * hw,u32 reg_addr,u32 reg_val)5392 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5393 {
5394 i40e_status status = 0;
5395 bool use_register;
5396 int retry = 5;
5397
5398 use_register = (((hw->aq.api_maj_ver == 1) &&
5399 (hw->aq.api_min_ver < 5)) ||
5400 (hw->mac.type == I40E_MAC_X722));
5401 if (!use_register) {
5402 do_retry:
5403 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5404 reg_val, NULL);
5405 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5406 usleep_range(1000, 2000);
5407 retry--;
5408 goto do_retry;
5409 }
5410 }
5411
5412 /* if the AQ access failed, try the old-fashioned way */
5413 if (status || use_register)
5414 wr32(hw, reg_addr, reg_val);
5415 }
5416
5417 /**
5418 * i40e_mdio_if_number_selection - MDIO I/F number selection
5419 * @hw: pointer to the hw struct
5420 * @set_mdio: use MDIO I/F number specified by mdio_num
5421 * @mdio_num: MDIO I/F number
5422 * @cmd: pointer to PHY Register command structure
5423 **/
i40e_mdio_if_number_selection(struct i40e_hw * hw,bool set_mdio,u8 mdio_num,struct i40e_aqc_phy_register_access * cmd)5424 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5425 u8 mdio_num,
5426 struct i40e_aqc_phy_register_access *cmd)
5427 {
5428 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5429 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5430 cmd->cmd_flags |=
5431 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5432 ((mdio_num <<
5433 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5434 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5435 else
5436 i40e_debug(hw, I40E_DEBUG_PHY,
5437 "MDIO I/F number selection not supported by current FW version.\n");
5438 }
5439 }
5440
5441 /**
5442 * i40e_aq_set_phy_register_ext
5443 * @hw: pointer to the hw struct
5444 * @phy_select: select which phy should be accessed
5445 * @dev_addr: PHY device address
5446 * @page_change: flag to indicate if phy page should be updated
5447 * @set_mdio: use MDIO I/F number specified by mdio_num
5448 * @mdio_num: MDIO I/F number
5449 * @reg_addr: PHY register address
5450 * @reg_val: new register value
5451 * @cmd_details: pointer to command details structure or NULL
5452 *
5453 * Write the external PHY register.
5454 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5455 * may use simple wrapper i40e_aq_set_phy_register.
5456 **/
i40e_aq_set_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5457 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5458 u8 phy_select, u8 dev_addr, bool page_change,
5459 bool set_mdio, u8 mdio_num,
5460 u32 reg_addr, u32 reg_val,
5461 struct i40e_asq_cmd_details *cmd_details)
5462 {
5463 struct i40e_aq_desc desc;
5464 struct i40e_aqc_phy_register_access *cmd =
5465 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5466 i40e_status status;
5467
5468 i40e_fill_default_direct_cmd_desc(&desc,
5469 i40e_aqc_opc_set_phy_register);
5470
5471 cmd->phy_interface = phy_select;
5472 cmd->dev_address = dev_addr;
5473 cmd->reg_address = cpu_to_le32(reg_addr);
5474 cmd->reg_value = cpu_to_le32(reg_val);
5475
5476 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5477
5478 if (!page_change)
5479 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5480
5481 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5482
5483 return status;
5484 }
5485
5486 /**
5487 * i40e_aq_get_phy_register_ext
5488 * @hw: pointer to the hw struct
5489 * @phy_select: select which phy should be accessed
5490 * @dev_addr: PHY device address
5491 * @page_change: flag to indicate if phy page should be updated
5492 * @set_mdio: use MDIO I/F number specified by mdio_num
5493 * @mdio_num: MDIO I/F number
5494 * @reg_addr: PHY register address
5495 * @reg_val: read register value
5496 * @cmd_details: pointer to command details structure or NULL
5497 *
5498 * Read the external PHY register.
5499 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5500 * may use simple wrapper i40e_aq_get_phy_register.
5501 **/
i40e_aq_get_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5502 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5503 u8 phy_select, u8 dev_addr, bool page_change,
5504 bool set_mdio, u8 mdio_num,
5505 u32 reg_addr, u32 *reg_val,
5506 struct i40e_asq_cmd_details *cmd_details)
5507 {
5508 struct i40e_aq_desc desc;
5509 struct i40e_aqc_phy_register_access *cmd =
5510 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5511 i40e_status status;
5512
5513 i40e_fill_default_direct_cmd_desc(&desc,
5514 i40e_aqc_opc_get_phy_register);
5515
5516 cmd->phy_interface = phy_select;
5517 cmd->dev_address = dev_addr;
5518 cmd->reg_address = cpu_to_le32(reg_addr);
5519
5520 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5521
5522 if (!page_change)
5523 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5524
5525 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5526 if (!status)
5527 *reg_val = le32_to_cpu(cmd->reg_value);
5528
5529 return status;
5530 }
5531
5532 /**
5533 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5534 * @hw: pointer to the hw struct
5535 * @buff: command buffer (size in bytes = buff_size)
5536 * @buff_size: buffer size in bytes
5537 * @track_id: package tracking id
5538 * @error_offset: returns error offset
5539 * @error_info: returns error information
5540 * @cmd_details: pointer to command details structure or NULL
5541 **/
5542 enum
i40e_aq_write_ddp(struct i40e_hw * hw,void * buff,u16 buff_size,u32 track_id,u32 * error_offset,u32 * error_info,struct i40e_asq_cmd_details * cmd_details)5543 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5544 u16 buff_size, u32 track_id,
5545 u32 *error_offset, u32 *error_info,
5546 struct i40e_asq_cmd_details *cmd_details)
5547 {
5548 struct i40e_aq_desc desc;
5549 struct i40e_aqc_write_personalization_profile *cmd =
5550 (struct i40e_aqc_write_personalization_profile *)
5551 &desc.params.raw;
5552 struct i40e_aqc_write_ddp_resp *resp;
5553 i40e_status status;
5554
5555 i40e_fill_default_direct_cmd_desc(&desc,
5556 i40e_aqc_opc_write_personalization_profile);
5557
5558 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5559 if (buff_size > I40E_AQ_LARGE_BUF)
5560 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5561
5562 desc.datalen = cpu_to_le16(buff_size);
5563
5564 cmd->profile_track_id = cpu_to_le32(track_id);
5565
5566 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5567 if (!status) {
5568 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5569 if (error_offset)
5570 *error_offset = le32_to_cpu(resp->error_offset);
5571 if (error_info)
5572 *error_info = le32_to_cpu(resp->error_info);
5573 }
5574
5575 return status;
5576 }
5577
5578 /**
5579 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5580 * @hw: pointer to the hw struct
5581 * @buff: command buffer (size in bytes = buff_size)
5582 * @buff_size: buffer size in bytes
5583 * @flags: AdminQ command flags
5584 * @cmd_details: pointer to command details structure or NULL
5585 **/
5586 enum
i40e_aq_get_ddp_list(struct i40e_hw * hw,void * buff,u16 buff_size,u8 flags,struct i40e_asq_cmd_details * cmd_details)5587 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5588 u16 buff_size, u8 flags,
5589 struct i40e_asq_cmd_details *cmd_details)
5590 {
5591 struct i40e_aq_desc desc;
5592 struct i40e_aqc_get_applied_profiles *cmd =
5593 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5594 i40e_status status;
5595
5596 i40e_fill_default_direct_cmd_desc(&desc,
5597 i40e_aqc_opc_get_personalization_profile_list);
5598
5599 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5600 if (buff_size > I40E_AQ_LARGE_BUF)
5601 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5602 desc.datalen = cpu_to_le16(buff_size);
5603
5604 cmd->flags = flags;
5605
5606 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5607
5608 return status;
5609 }
5610
5611 /**
5612 * i40e_find_segment_in_package
5613 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5614 * @pkg_hdr: pointer to the package header to be searched
5615 *
5616 * This function searches a package file for a particular segment type. On
5617 * success it returns a pointer to the segment header, otherwise it will
5618 * return NULL.
5619 **/
5620 struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,struct i40e_package_header * pkg_hdr)5621 i40e_find_segment_in_package(u32 segment_type,
5622 struct i40e_package_header *pkg_hdr)
5623 {
5624 struct i40e_generic_seg_header *segment;
5625 u32 i;
5626
5627 /* Search all package segments for the requested segment type */
5628 for (i = 0; i < pkg_hdr->segment_count; i++) {
5629 segment =
5630 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5631 pkg_hdr->segment_offset[i]);
5632
5633 if (segment->type == segment_type)
5634 return segment;
5635 }
5636
5637 return NULL;
5638 }
5639
5640 /* Get section table in profile */
5641 #define I40E_SECTION_TABLE(profile, sec_tbl) \
5642 do { \
5643 struct i40e_profile_segment *p = (profile); \
5644 u32 count; \
5645 u32 *nvm; \
5646 count = p->device_table_count; \
5647 nvm = (u32 *)&p->device_table[count]; \
5648 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5649 } while (0)
5650
5651 /* Get section header in profile */
5652 #define I40E_SECTION_HEADER(profile, offset) \
5653 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5654
5655 /**
5656 * i40e_find_section_in_profile
5657 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5658 * @profile: pointer to the i40e segment header to be searched
5659 *
5660 * This function searches i40e segment for a particular section type. On
5661 * success it returns a pointer to the section header, otherwise it will
5662 * return NULL.
5663 **/
5664 struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,struct i40e_profile_segment * profile)5665 i40e_find_section_in_profile(u32 section_type,
5666 struct i40e_profile_segment *profile)
5667 {
5668 struct i40e_profile_section_header *sec;
5669 struct i40e_section_table *sec_tbl;
5670 u32 sec_off;
5671 u32 i;
5672
5673 if (profile->header.type != SEGMENT_TYPE_I40E)
5674 return NULL;
5675
5676 I40E_SECTION_TABLE(profile, sec_tbl);
5677
5678 for (i = 0; i < sec_tbl->section_count; i++) {
5679 sec_off = sec_tbl->section_offset[i];
5680 sec = I40E_SECTION_HEADER(profile, sec_off);
5681 if (sec->section.type == section_type)
5682 return sec;
5683 }
5684
5685 return NULL;
5686 }
5687
5688 /**
5689 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5690 * @hw: pointer to the hw struct
5691 * @aq: command buffer containing all data to execute AQ
5692 **/
5693 static enum
i40e_ddp_exec_aq_section(struct i40e_hw * hw,struct i40e_profile_aq_section * aq)5694 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5695 struct i40e_profile_aq_section *aq)
5696 {
5697 i40e_status status;
5698 struct i40e_aq_desc desc;
5699 u8 *msg = NULL;
5700 u16 msglen;
5701
5702 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5703 desc.flags |= cpu_to_le16(aq->flags);
5704 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5705
5706 msglen = aq->datalen;
5707 if (msglen) {
5708 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5709 I40E_AQ_FLAG_RD));
5710 if (msglen > I40E_AQ_LARGE_BUF)
5711 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5712 desc.datalen = cpu_to_le16(msglen);
5713 msg = &aq->data[0];
5714 }
5715
5716 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5717
5718 if (status) {
5719 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5720 "unable to exec DDP AQ opcode %u, error %d\n",
5721 aq->opcode, status);
5722 return status;
5723 }
5724
5725 /* copy returned desc to aq_buf */
5726 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5727
5728 return 0;
5729 }
5730
5731 /**
5732 * i40e_validate_profile
5733 * @hw: pointer to the hardware structure
5734 * @profile: pointer to the profile segment of the package to be validated
5735 * @track_id: package tracking id
5736 * @rollback: flag if the profile is for rollback.
5737 *
5738 * Validates supported devices and profile's sections.
5739 */
5740 static enum i40e_status_code
i40e_validate_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id,bool rollback)5741 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5742 u32 track_id, bool rollback)
5743 {
5744 struct i40e_profile_section_header *sec = NULL;
5745 i40e_status status = 0;
5746 struct i40e_section_table *sec_tbl;
5747 u32 vendor_dev_id;
5748 u32 dev_cnt;
5749 u32 sec_off;
5750 u32 i;
5751
5752 if (track_id == I40E_DDP_TRACKID_INVALID) {
5753 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5754 return I40E_NOT_SUPPORTED;
5755 }
5756
5757 dev_cnt = profile->device_table_count;
5758 for (i = 0; i < dev_cnt; i++) {
5759 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5760 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5761 hw->device_id == (vendor_dev_id & 0xFFFF))
5762 break;
5763 }
5764 if (dev_cnt && i == dev_cnt) {
5765 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5766 "Device doesn't support DDP\n");
5767 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5768 }
5769
5770 I40E_SECTION_TABLE(profile, sec_tbl);
5771
5772 /* Validate sections types */
5773 for (i = 0; i < sec_tbl->section_count; i++) {
5774 sec_off = sec_tbl->section_offset[i];
5775 sec = I40E_SECTION_HEADER(profile, sec_off);
5776 if (rollback) {
5777 if (sec->section.type == SECTION_TYPE_MMIO ||
5778 sec->section.type == SECTION_TYPE_AQ ||
5779 sec->section.type == SECTION_TYPE_RB_AQ) {
5780 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5781 "Not a roll-back package\n");
5782 return I40E_NOT_SUPPORTED;
5783 }
5784 } else {
5785 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5786 sec->section.type == SECTION_TYPE_RB_MMIO) {
5787 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5788 "Not an original package\n");
5789 return I40E_NOT_SUPPORTED;
5790 }
5791 }
5792 }
5793
5794 return status;
5795 }
5796
5797 /**
5798 * i40e_write_profile
5799 * @hw: pointer to the hardware structure
5800 * @profile: pointer to the profile segment of the package to be downloaded
5801 * @track_id: package tracking id
5802 *
5803 * Handles the download of a complete package.
5804 */
5805 enum i40e_status_code
i40e_write_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5806 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5807 u32 track_id)
5808 {
5809 i40e_status status = 0;
5810 struct i40e_section_table *sec_tbl;
5811 struct i40e_profile_section_header *sec = NULL;
5812 struct i40e_profile_aq_section *ddp_aq;
5813 u32 section_size = 0;
5814 u32 offset = 0, info = 0;
5815 u32 sec_off;
5816 u32 i;
5817
5818 status = i40e_validate_profile(hw, profile, track_id, false);
5819 if (status)
5820 return status;
5821
5822 I40E_SECTION_TABLE(profile, sec_tbl);
5823
5824 for (i = 0; i < sec_tbl->section_count; i++) {
5825 sec_off = sec_tbl->section_offset[i];
5826 sec = I40E_SECTION_HEADER(profile, sec_off);
5827 /* Process generic admin command */
5828 if (sec->section.type == SECTION_TYPE_AQ) {
5829 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5830 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5831 if (status) {
5832 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5833 "Failed to execute aq: section %d, opcode %u\n",
5834 i, ddp_aq->opcode);
5835 break;
5836 }
5837 sec->section.type = SECTION_TYPE_RB_AQ;
5838 }
5839
5840 /* Skip any non-mmio sections */
5841 if (sec->section.type != SECTION_TYPE_MMIO)
5842 continue;
5843
5844 section_size = sec->section.size +
5845 sizeof(struct i40e_profile_section_header);
5846
5847 /* Write MMIO section */
5848 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5849 track_id, &offset, &info, NULL);
5850 if (status) {
5851 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5852 "Failed to write profile: section %d, offset %d, info %d\n",
5853 i, offset, info);
5854 break;
5855 }
5856 }
5857 return status;
5858 }
5859
5860 /**
5861 * i40e_rollback_profile
5862 * @hw: pointer to the hardware structure
5863 * @profile: pointer to the profile segment of the package to be removed
5864 * @track_id: package tracking id
5865 *
5866 * Rolls back previously loaded package.
5867 */
5868 enum i40e_status_code
i40e_rollback_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5869 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5870 u32 track_id)
5871 {
5872 struct i40e_profile_section_header *sec = NULL;
5873 i40e_status status = 0;
5874 struct i40e_section_table *sec_tbl;
5875 u32 offset = 0, info = 0;
5876 u32 section_size = 0;
5877 u32 sec_off;
5878 int i;
5879
5880 status = i40e_validate_profile(hw, profile, track_id, true);
5881 if (status)
5882 return status;
5883
5884 I40E_SECTION_TABLE(profile, sec_tbl);
5885
5886 /* For rollback write sections in reverse */
5887 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5888 sec_off = sec_tbl->section_offset[i];
5889 sec = I40E_SECTION_HEADER(profile, sec_off);
5890
5891 /* Skip any non-rollback sections */
5892 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5893 continue;
5894
5895 section_size = sec->section.size +
5896 sizeof(struct i40e_profile_section_header);
5897
5898 /* Write roll-back MMIO section */
5899 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5900 track_id, &offset, &info, NULL);
5901 if (status) {
5902 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5903 "Failed to write profile: section %d, offset %d, info %d\n",
5904 i, offset, info);
5905 break;
5906 }
5907 }
5908 return status;
5909 }
5910
5911 /**
5912 * i40e_add_pinfo_to_list
5913 * @hw: pointer to the hardware structure
5914 * @profile: pointer to the profile segment of the package
5915 * @profile_info_sec: buffer for information section
5916 * @track_id: package tracking id
5917 *
5918 * Register a profile to the list of loaded profiles.
5919 */
5920 enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw * hw,struct i40e_profile_segment * profile,u8 * profile_info_sec,u32 track_id)5921 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5922 struct i40e_profile_segment *profile,
5923 u8 *profile_info_sec, u32 track_id)
5924 {
5925 i40e_status status = 0;
5926 struct i40e_profile_section_header *sec = NULL;
5927 struct i40e_profile_info *pinfo;
5928 u32 offset = 0, info = 0;
5929
5930 sec = (struct i40e_profile_section_header *)profile_info_sec;
5931 sec->tbl_size = 1;
5932 sec->data_end = sizeof(struct i40e_profile_section_header) +
5933 sizeof(struct i40e_profile_info);
5934 sec->section.type = SECTION_TYPE_INFO;
5935 sec->section.offset = sizeof(struct i40e_profile_section_header);
5936 sec->section.size = sizeof(struct i40e_profile_info);
5937 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5938 sec->section.offset);
5939 pinfo->track_id = track_id;
5940 pinfo->version = profile->version;
5941 pinfo->op = I40E_DDP_ADD_TRACKID;
5942 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5943
5944 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5945 track_id, &offset, &info, NULL);
5946
5947 return status;
5948 }
5949
5950 /**
5951 * i40e_aq_add_cloud_filters
5952 * @hw: pointer to the hardware structure
5953 * @seid: VSI seid to add cloud filters from
5954 * @filters: Buffer which contains the filters to be added
5955 * @filter_count: number of filters contained in the buffer
5956 *
5957 * Set the cloud filters for a given VSI. The contents of the
5958 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5959 * of the function.
5960 *
5961 **/
5962 enum i40e_status_code
i40e_aq_add_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5963 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5964 struct i40e_aqc_cloud_filters_element_data *filters,
5965 u8 filter_count)
5966 {
5967 struct i40e_aq_desc desc;
5968 struct i40e_aqc_add_remove_cloud_filters *cmd =
5969 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5970 enum i40e_status_code status;
5971 u16 buff_len;
5972
5973 i40e_fill_default_direct_cmd_desc(&desc,
5974 i40e_aqc_opc_add_cloud_filters);
5975
5976 buff_len = filter_count * sizeof(*filters);
5977 desc.datalen = cpu_to_le16(buff_len);
5978 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5979 cmd->num_filters = filter_count;
5980 cmd->seid = cpu_to_le16(seid);
5981
5982 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5983
5984 return status;
5985 }
5986
5987 /**
5988 * i40e_aq_add_cloud_filters_bb
5989 * @hw: pointer to the hardware structure
5990 * @seid: VSI seid to add cloud filters from
5991 * @filters: Buffer which contains the filters in big buffer to be added
5992 * @filter_count: number of filters contained in the buffer
5993 *
5994 * Set the big buffer cloud filters for a given VSI. The contents of the
5995 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5996 * function.
5997 *
5998 **/
5999 enum i40e_status_code
i40e_aq_add_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)6000 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6001 struct i40e_aqc_cloud_filters_element_bb *filters,
6002 u8 filter_count)
6003 {
6004 struct i40e_aq_desc desc;
6005 struct i40e_aqc_add_remove_cloud_filters *cmd =
6006 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6007 i40e_status status;
6008 u16 buff_len;
6009 int i;
6010
6011 i40e_fill_default_direct_cmd_desc(&desc,
6012 i40e_aqc_opc_add_cloud_filters);
6013
6014 buff_len = filter_count * sizeof(*filters);
6015 desc.datalen = cpu_to_le16(buff_len);
6016 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6017 cmd->num_filters = filter_count;
6018 cmd->seid = cpu_to_le16(seid);
6019 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6020
6021 for (i = 0; i < filter_count; i++) {
6022 u16 tnl_type;
6023 u32 ti;
6024
6025 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6026 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6027 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6028
6029 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6030 * one more byte further than normally used for Tenant ID in
6031 * other tunnel types.
6032 */
6033 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6034 ti = le32_to_cpu(filters[i].element.tenant_id);
6035 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6036 }
6037 }
6038
6039 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6040
6041 return status;
6042 }
6043
6044 /**
6045 * i40e_aq_rem_cloud_filters
6046 * @hw: pointer to the hardware structure
6047 * @seid: VSI seid to remove cloud filters from
6048 * @filters: Buffer which contains the filters to be removed
6049 * @filter_count: number of filters contained in the buffer
6050 *
6051 * Remove the cloud filters for a given VSI. The contents of the
6052 * i40e_aqc_cloud_filters_element_data are filled in by the caller
6053 * of the function.
6054 *
6055 **/
6056 enum i40e_status_code
i40e_aq_rem_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)6057 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6058 struct i40e_aqc_cloud_filters_element_data *filters,
6059 u8 filter_count)
6060 {
6061 struct i40e_aq_desc desc;
6062 struct i40e_aqc_add_remove_cloud_filters *cmd =
6063 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6064 enum i40e_status_code status;
6065 u16 buff_len;
6066
6067 i40e_fill_default_direct_cmd_desc(&desc,
6068 i40e_aqc_opc_remove_cloud_filters);
6069
6070 buff_len = filter_count * sizeof(*filters);
6071 desc.datalen = cpu_to_le16(buff_len);
6072 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6073 cmd->num_filters = filter_count;
6074 cmd->seid = cpu_to_le16(seid);
6075
6076 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6077
6078 return status;
6079 }
6080
6081 /**
6082 * i40e_aq_rem_cloud_filters_bb
6083 * @hw: pointer to the hardware structure
6084 * @seid: VSI seid to remove cloud filters from
6085 * @filters: Buffer which contains the filters in big buffer to be removed
6086 * @filter_count: number of filters contained in the buffer
6087 *
6088 * Remove the big buffer cloud filters for a given VSI. The contents of the
6089 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6090 * function.
6091 *
6092 **/
6093 enum i40e_status_code
i40e_aq_rem_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)6094 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6095 struct i40e_aqc_cloud_filters_element_bb *filters,
6096 u8 filter_count)
6097 {
6098 struct i40e_aq_desc desc;
6099 struct i40e_aqc_add_remove_cloud_filters *cmd =
6100 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6101 i40e_status status;
6102 u16 buff_len;
6103 int i;
6104
6105 i40e_fill_default_direct_cmd_desc(&desc,
6106 i40e_aqc_opc_remove_cloud_filters);
6107
6108 buff_len = filter_count * sizeof(*filters);
6109 desc.datalen = cpu_to_le16(buff_len);
6110 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6111 cmd->num_filters = filter_count;
6112 cmd->seid = cpu_to_le16(seid);
6113 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6114
6115 for (i = 0; i < filter_count; i++) {
6116 u16 tnl_type;
6117 u32 ti;
6118
6119 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6120 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6121 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6122
6123 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6124 * one more byte further than normally used for Tenant ID in
6125 * other tunnel types.
6126 */
6127 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6128 ti = le32_to_cpu(filters[i].element.tenant_id);
6129 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6130 }
6131 }
6132
6133 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6134
6135 return status;
6136 }
6137