1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4 #include "i40e.h"
5 #include "i40e_type.h"
6 #include "i40e_adminq.h"
7 #include "i40e_prototype.h"
8 #include <linux/avf/virtchnl.h>
9
10 /**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
i40e_set_mac_type(struct i40e_hw * hw)17 int i40e_set_mac_type(struct i40e_hw *hw)
18 {
19 int status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_1G_BASE_T_BC:
31 case I40E_DEV_ID_5G_BASE_T_BC:
32 case I40E_DEV_ID_10G_BASE_T:
33 case I40E_DEV_ID_10G_BASE_T4:
34 case I40E_DEV_ID_10G_BASE_T_BC:
35 case I40E_DEV_ID_10G_B:
36 case I40E_DEV_ID_10G_SFP:
37 case I40E_DEV_ID_20G_KR2:
38 case I40E_DEV_ID_20G_KR2_A:
39 case I40E_DEV_ID_25G_B:
40 case I40E_DEV_ID_25G_SFP28:
41 case I40E_DEV_ID_X710_N3000:
42 case I40E_DEV_ID_XXV710_N3000:
43 hw->mac.type = I40E_MAC_XL710;
44 break;
45 case I40E_DEV_ID_KX_X722:
46 case I40E_DEV_ID_QSFP_X722:
47 case I40E_DEV_ID_SFP_X722:
48 case I40E_DEV_ID_1G_BASE_T_X722:
49 case I40E_DEV_ID_10G_BASE_T_X722:
50 case I40E_DEV_ID_SFP_I_X722:
51 case I40E_DEV_ID_SFP_X722_A:
52 hw->mac.type = I40E_MAC_X722;
53 break;
54 default:
55 hw->mac.type = I40E_MAC_GENERIC;
56 break;
57 }
58 } else {
59 status = -ENODEV;
60 }
61
62 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
63 hw->mac.type, status);
64 return status;
65 }
66
67 /**
68 * i40e_aq_str - convert AQ err code to a string
69 * @hw: pointer to the HW structure
70 * @aq_err: the AQ error code to convert
71 **/
i40e_aq_str(struct i40e_hw * hw,enum i40e_admin_queue_err aq_err)72 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
73 {
74 switch (aq_err) {
75 case I40E_AQ_RC_OK:
76 return "OK";
77 case I40E_AQ_RC_EPERM:
78 return "I40E_AQ_RC_EPERM";
79 case I40E_AQ_RC_ENOENT:
80 return "I40E_AQ_RC_ENOENT";
81 case I40E_AQ_RC_ESRCH:
82 return "I40E_AQ_RC_ESRCH";
83 case I40E_AQ_RC_EINTR:
84 return "I40E_AQ_RC_EINTR";
85 case I40E_AQ_RC_EIO:
86 return "I40E_AQ_RC_EIO";
87 case I40E_AQ_RC_ENXIO:
88 return "I40E_AQ_RC_ENXIO";
89 case I40E_AQ_RC_E2BIG:
90 return "I40E_AQ_RC_E2BIG";
91 case I40E_AQ_RC_EAGAIN:
92 return "I40E_AQ_RC_EAGAIN";
93 case I40E_AQ_RC_ENOMEM:
94 return "I40E_AQ_RC_ENOMEM";
95 case I40E_AQ_RC_EACCES:
96 return "I40E_AQ_RC_EACCES";
97 case I40E_AQ_RC_EFAULT:
98 return "I40E_AQ_RC_EFAULT";
99 case I40E_AQ_RC_EBUSY:
100 return "I40E_AQ_RC_EBUSY";
101 case I40E_AQ_RC_EEXIST:
102 return "I40E_AQ_RC_EEXIST";
103 case I40E_AQ_RC_EINVAL:
104 return "I40E_AQ_RC_EINVAL";
105 case I40E_AQ_RC_ENOTTY:
106 return "I40E_AQ_RC_ENOTTY";
107 case I40E_AQ_RC_ENOSPC:
108 return "I40E_AQ_RC_ENOSPC";
109 case I40E_AQ_RC_ENOSYS:
110 return "I40E_AQ_RC_ENOSYS";
111 case I40E_AQ_RC_ERANGE:
112 return "I40E_AQ_RC_ERANGE";
113 case I40E_AQ_RC_EFLUSHED:
114 return "I40E_AQ_RC_EFLUSHED";
115 case I40E_AQ_RC_BAD_ADDR:
116 return "I40E_AQ_RC_BAD_ADDR";
117 case I40E_AQ_RC_EMODE:
118 return "I40E_AQ_RC_EMODE";
119 case I40E_AQ_RC_EFBIG:
120 return "I40E_AQ_RC_EFBIG";
121 }
122
123 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
124 return hw->err_str;
125 }
126
127 /**
128 * i40e_debug_aq
129 * @hw: debug mask related to admin queue
130 * @mask: debug mask
131 * @desc: pointer to admin queue descriptor
132 * @buffer: pointer to command buffer
133 * @buf_len: max length of buffer
134 *
135 * Dumps debug log about adminq command with descriptor contents.
136 **/
i40e_debug_aq(struct i40e_hw * hw,enum i40e_debug_mask mask,void * desc,void * buffer,u16 buf_len)137 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
138 void *buffer, u16 buf_len)
139 {
140 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
141 u32 effective_mask = hw->debug_mask & mask;
142 char prefix[27];
143 u16 len;
144 u8 *buf = (u8 *)buffer;
145
146 if (!effective_mask || !desc)
147 return;
148
149 len = le16_to_cpu(aq_desc->datalen);
150
151 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
152 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
153 le16_to_cpu(aq_desc->opcode),
154 le16_to_cpu(aq_desc->flags),
155 le16_to_cpu(aq_desc->datalen),
156 le16_to_cpu(aq_desc->retval));
157 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
158 "\tcookie (h,l) 0x%08X 0x%08X\n",
159 le32_to_cpu(aq_desc->cookie_high),
160 le32_to_cpu(aq_desc->cookie_low));
161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
162 "\tparam (0,1) 0x%08X 0x%08X\n",
163 le32_to_cpu(aq_desc->params.internal.param0),
164 le32_to_cpu(aq_desc->params.internal.param1));
165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
166 "\taddr (h,l) 0x%08X 0x%08X\n",
167 le32_to_cpu(aq_desc->params.external.addr_high),
168 le32_to_cpu(aq_desc->params.external.addr_low));
169
170 if (buffer && buf_len != 0 && len != 0 &&
171 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
172 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
173 if (buf_len < len)
174 len = buf_len;
175
176 snprintf(prefix, sizeof(prefix),
177 "i40e %02x:%02x.%x: \t0x",
178 hw->bus.bus_id,
179 hw->bus.device,
180 hw->bus.func);
181
182 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
183 16, 1, buf, len, false);
184 }
185 }
186
187 /**
188 * i40e_check_asq_alive
189 * @hw: pointer to the hw struct
190 *
191 * Returns true if Queue is enabled else false.
192 **/
i40e_check_asq_alive(struct i40e_hw * hw)193 bool i40e_check_asq_alive(struct i40e_hw *hw)
194 {
195 if (hw->aq.asq.len)
196 return !!(rd32(hw, hw->aq.asq.len) &
197 I40E_PF_ATQLEN_ATQENABLE_MASK);
198 else
199 return false;
200 }
201
202 /**
203 * i40e_aq_queue_shutdown
204 * @hw: pointer to the hw struct
205 * @unloading: is the driver unloading itself
206 *
207 * Tell the Firmware that we're shutting down the AdminQ and whether
208 * or not the driver is unloading as well.
209 **/
i40e_aq_queue_shutdown(struct i40e_hw * hw,bool unloading)210 int i40e_aq_queue_shutdown(struct i40e_hw *hw,
211 bool unloading)
212 {
213 struct i40e_aq_desc desc;
214 struct i40e_aqc_queue_shutdown *cmd =
215 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
216 int status;
217
218 i40e_fill_default_direct_cmd_desc(&desc,
219 i40e_aqc_opc_queue_shutdown);
220
221 if (unloading)
222 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
223 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
224
225 return status;
226 }
227
228 /**
229 * i40e_aq_get_set_rss_lut
230 * @hw: pointer to the hardware structure
231 * @vsi_id: vsi fw index
232 * @pf_lut: for PF table set true, for VSI table set false
233 * @lut: pointer to the lut buffer provided by the caller
234 * @lut_size: size of the lut buffer
235 * @set: set true to set the table, false to get the table
236 *
237 * Internal function to get or set RSS look up table
238 **/
i40e_aq_get_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size,bool set)239 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
240 u16 vsi_id, bool pf_lut,
241 u8 *lut, u16 lut_size,
242 bool set)
243 {
244 struct i40e_aq_desc desc;
245 struct i40e_aqc_get_set_rss_lut *cmd_resp =
246 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
247 int status;
248
249 if (set)
250 i40e_fill_default_direct_cmd_desc(&desc,
251 i40e_aqc_opc_set_rss_lut);
252 else
253 i40e_fill_default_direct_cmd_desc(&desc,
254 i40e_aqc_opc_get_rss_lut);
255
256 /* Indirect command */
257 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
258 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
259
260 cmd_resp->vsi_id =
261 cpu_to_le16((u16)((vsi_id <<
262 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
263 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
264 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
265
266 if (pf_lut)
267 cmd_resp->flags |= cpu_to_le16((u16)
268 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
269 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
270 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
271 else
272 cmd_resp->flags |= cpu_to_le16((u16)
273 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
275 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
276
277 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
278
279 return status;
280 }
281
282 /**
283 * i40e_aq_get_rss_lut
284 * @hw: pointer to the hardware structure
285 * @vsi_id: vsi fw index
286 * @pf_lut: for PF table set true, for VSI table set false
287 * @lut: pointer to the lut buffer provided by the caller
288 * @lut_size: size of the lut buffer
289 *
290 * get the RSS lookup table, PF or VSI type
291 **/
i40e_aq_get_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)292 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
293 bool pf_lut, u8 *lut, u16 lut_size)
294 {
295 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
296 false);
297 }
298
299 /**
300 * i40e_aq_set_rss_lut
301 * @hw: pointer to the hardware structure
302 * @vsi_id: vsi fw index
303 * @pf_lut: for PF table set true, for VSI table set false
304 * @lut: pointer to the lut buffer provided by the caller
305 * @lut_size: size of the lut buffer
306 *
307 * set the RSS lookup table, PF or VSI type
308 **/
i40e_aq_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)309 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
310 bool pf_lut, u8 *lut, u16 lut_size)
311 {
312 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
313 }
314
315 /**
316 * i40e_aq_get_set_rss_key
317 * @hw: pointer to the hw struct
318 * @vsi_id: vsi fw index
319 * @key: pointer to key info struct
320 * @set: set true to set the key, false to get the key
321 *
322 * get the RSS key per VSI
323 **/
i40e_aq_get_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key,bool set)324 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
325 u16 vsi_id,
326 struct i40e_aqc_get_set_rss_key_data *key,
327 bool set)
328 {
329 struct i40e_aq_desc desc;
330 struct i40e_aqc_get_set_rss_key *cmd_resp =
331 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
332 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
333 int status;
334
335 if (set)
336 i40e_fill_default_direct_cmd_desc(&desc,
337 i40e_aqc_opc_set_rss_key);
338 else
339 i40e_fill_default_direct_cmd_desc(&desc,
340 i40e_aqc_opc_get_rss_key);
341
342 /* Indirect command */
343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
344 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
345
346 cmd_resp->vsi_id =
347 cpu_to_le16((u16)((vsi_id <<
348 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
349 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
350 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
351
352 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
353
354 return status;
355 }
356
357 /**
358 * i40e_aq_get_rss_key
359 * @hw: pointer to the hw struct
360 * @vsi_id: vsi fw index
361 * @key: pointer to key info struct
362 *
363 **/
i40e_aq_get_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)364 int i40e_aq_get_rss_key(struct i40e_hw *hw,
365 u16 vsi_id,
366 struct i40e_aqc_get_set_rss_key_data *key)
367 {
368 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
369 }
370
371 /**
372 * i40e_aq_set_rss_key
373 * @hw: pointer to the hw struct
374 * @vsi_id: vsi fw index
375 * @key: pointer to key info struct
376 *
377 * set the RSS key per VSI
378 **/
i40e_aq_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)379 int i40e_aq_set_rss_key(struct i40e_hw *hw,
380 u16 vsi_id,
381 struct i40e_aqc_get_set_rss_key_data *key)
382 {
383 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
384 }
385
386 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
387 * hardware to a bit-field that can be used by SW to more easily determine the
388 * packet type.
389 *
390 * Macros are used to shorten the table lines and make this table human
391 * readable.
392 *
393 * We store the PTYPE in the top byte of the bit field - this is just so that
394 * we can check that the table doesn't have a row missing, as the index into
395 * the table should be the PTYPE.
396 *
397 * Typical work flow:
398 *
399 * IF NOT i40e_ptype_lookup[ptype].known
400 * THEN
401 * Packet is unknown
402 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
403 * Use the rest of the fields to look at the tunnels, inner protocols, etc
404 * ELSE
405 * Use the enum i40e_rx_l2_ptype to decode the packet type
406 * ENDIF
407 */
408
409 /* macro to make the table lines short, use explicit indexing with [PTYPE] */
410 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
411 [PTYPE] = { \
412 1, \
413 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
414 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
415 I40E_RX_PTYPE_##OUTER_FRAG, \
416 I40E_RX_PTYPE_TUNNEL_##T, \
417 I40E_RX_PTYPE_TUNNEL_END_##TE, \
418 I40E_RX_PTYPE_##TEF, \
419 I40E_RX_PTYPE_INNER_PROT_##I, \
420 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
421
422 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
423
424 /* shorter macros makes the table fit but are terse */
425 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
426 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
427 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
428
429 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
430 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
431 /* L2 Packet types */
432 I40E_PTT_UNUSED_ENTRY(0),
433 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
434 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
435 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
436 I40E_PTT_UNUSED_ENTRY(4),
437 I40E_PTT_UNUSED_ENTRY(5),
438 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
439 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
440 I40E_PTT_UNUSED_ENTRY(8),
441 I40E_PTT_UNUSED_ENTRY(9),
442 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
443 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
444 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
445 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
446 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
447 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
448 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
449 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
450 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
451 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
452 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
453 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
454
455 /* Non Tunneled IPv4 */
456 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
457 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
458 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
459 I40E_PTT_UNUSED_ENTRY(25),
460 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
461 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
462 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
463
464 /* IPv4 --> IPv4 */
465 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
466 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
467 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
468 I40E_PTT_UNUSED_ENTRY(32),
469 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
470 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
471 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
472
473 /* IPv4 --> IPv6 */
474 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
475 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
476 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
477 I40E_PTT_UNUSED_ENTRY(39),
478 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
479 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
480 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
481
482 /* IPv4 --> GRE/NAT */
483 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
484
485 /* IPv4 --> GRE/NAT --> IPv4 */
486 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
487 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
488 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
489 I40E_PTT_UNUSED_ENTRY(47),
490 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
491 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
492 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
493
494 /* IPv4 --> GRE/NAT --> IPv6 */
495 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
496 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
497 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
498 I40E_PTT_UNUSED_ENTRY(54),
499 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
500 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
501 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
502
503 /* IPv4 --> GRE/NAT --> MAC */
504 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
505
506 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
507 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
508 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
509 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
510 I40E_PTT_UNUSED_ENTRY(62),
511 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
512 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
513 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
514
515 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
516 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
517 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
518 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
519 I40E_PTT_UNUSED_ENTRY(69),
520 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
521 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
522 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
523
524 /* IPv4 --> GRE/NAT --> MAC/VLAN */
525 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
526
527 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
528 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
529 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
530 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
531 I40E_PTT_UNUSED_ENTRY(77),
532 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
533 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
534 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
535
536 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
537 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
538 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
539 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
540 I40E_PTT_UNUSED_ENTRY(84),
541 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
542 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
543 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
544
545 /* Non Tunneled IPv6 */
546 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
547 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
548 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
549 I40E_PTT_UNUSED_ENTRY(91),
550 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
551 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
552 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
553
554 /* IPv6 --> IPv4 */
555 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
556 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
557 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
558 I40E_PTT_UNUSED_ENTRY(98),
559 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
560 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
561 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
562
563 /* IPv6 --> IPv6 */
564 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
565 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
566 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
567 I40E_PTT_UNUSED_ENTRY(105),
568 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
569 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
570 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
571
572 /* IPv6 --> GRE/NAT */
573 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
574
575 /* IPv6 --> GRE/NAT -> IPv4 */
576 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
577 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
578 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
579 I40E_PTT_UNUSED_ENTRY(113),
580 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
581 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
582 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
583
584 /* IPv6 --> GRE/NAT -> IPv6 */
585 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
586 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
587 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
588 I40E_PTT_UNUSED_ENTRY(120),
589 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
590 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
591 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
592
593 /* IPv6 --> GRE/NAT -> MAC */
594 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
595
596 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
597 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
598 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
599 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
600 I40E_PTT_UNUSED_ENTRY(128),
601 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
602 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
603 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
604
605 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
606 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
607 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
608 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
609 I40E_PTT_UNUSED_ENTRY(135),
610 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
611 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
612 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
613
614 /* IPv6 --> GRE/NAT -> MAC/VLAN */
615 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
616
617 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
618 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
619 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
620 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
621 I40E_PTT_UNUSED_ENTRY(143),
622 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
623 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
624 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
625
626 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
627 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
628 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
629 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
630 I40E_PTT_UNUSED_ENTRY(150),
631 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
632 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
633 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
634
635 /* unused entries */
636 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
637 };
638
639 /**
640 * i40e_init_shared_code - Initialize the shared code
641 * @hw: pointer to hardware structure
642 *
643 * This assigns the MAC type and PHY code and inits the NVM.
644 * Does not touch the hardware. This function must be called prior to any
645 * other function in the shared code. The i40e_hw structure should be
646 * memset to 0 prior to calling this function. The following fields in
647 * hw structure should be filled in prior to calling this function:
648 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
649 * subsystem_vendor_id, and revision_id
650 **/
i40e_init_shared_code(struct i40e_hw * hw)651 int i40e_init_shared_code(struct i40e_hw *hw)
652 {
653 u32 port, ari, func_rid;
654 int status = 0;
655
656 i40e_set_mac_type(hw);
657
658 switch (hw->mac.type) {
659 case I40E_MAC_XL710:
660 case I40E_MAC_X722:
661 break;
662 default:
663 return -ENODEV;
664 }
665
666 hw->phy.get_link_info = true;
667
668 /* Determine port number and PF number*/
669 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
670 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
671 hw->port = (u8)port;
672 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
673 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
674 func_rid = rd32(hw, I40E_PF_FUNC_RID);
675 if (ari)
676 hw->pf_id = (u8)(func_rid & 0xff);
677 else
678 hw->pf_id = (u8)(func_rid & 0x7);
679
680 status = i40e_init_nvm(hw);
681 return status;
682 }
683
684 /**
685 * i40e_aq_mac_address_read - Retrieve the MAC addresses
686 * @hw: pointer to the hw struct
687 * @flags: a return indicator of what addresses were added to the addr store
688 * @addrs: the requestor's mac addr store
689 * @cmd_details: pointer to command details structure or NULL
690 **/
691 static int
i40e_aq_mac_address_read(struct i40e_hw * hw,u16 * flags,struct i40e_aqc_mac_address_read_data * addrs,struct i40e_asq_cmd_details * cmd_details)692 i40e_aq_mac_address_read(struct i40e_hw *hw,
693 u16 *flags,
694 struct i40e_aqc_mac_address_read_data *addrs,
695 struct i40e_asq_cmd_details *cmd_details)
696 {
697 struct i40e_aq_desc desc;
698 struct i40e_aqc_mac_address_read *cmd_data =
699 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
700 int status;
701
702 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
703 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
704
705 status = i40e_asq_send_command(hw, &desc, addrs,
706 sizeof(*addrs), cmd_details);
707 *flags = le16_to_cpu(cmd_data->command_flags);
708
709 return status;
710 }
711
712 /**
713 * i40e_aq_mac_address_write - Change the MAC addresses
714 * @hw: pointer to the hw struct
715 * @flags: indicates which MAC to be written
716 * @mac_addr: address to write
717 * @cmd_details: pointer to command details structure or NULL
718 **/
i40e_aq_mac_address_write(struct i40e_hw * hw,u16 flags,u8 * mac_addr,struct i40e_asq_cmd_details * cmd_details)719 int i40e_aq_mac_address_write(struct i40e_hw *hw,
720 u16 flags, u8 *mac_addr,
721 struct i40e_asq_cmd_details *cmd_details)
722 {
723 struct i40e_aq_desc desc;
724 struct i40e_aqc_mac_address_write *cmd_data =
725 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
726 int status;
727
728 i40e_fill_default_direct_cmd_desc(&desc,
729 i40e_aqc_opc_mac_address_write);
730 cmd_data->command_flags = cpu_to_le16(flags);
731 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
732 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
733 ((u32)mac_addr[3] << 16) |
734 ((u32)mac_addr[4] << 8) |
735 mac_addr[5]);
736
737 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
738
739 return status;
740 }
741
742 /**
743 * i40e_get_mac_addr - get MAC address
744 * @hw: pointer to the HW structure
745 * @mac_addr: pointer to MAC address
746 *
747 * Reads the adapter's MAC address from register
748 **/
i40e_get_mac_addr(struct i40e_hw * hw,u8 * mac_addr)749 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
750 {
751 struct i40e_aqc_mac_address_read_data addrs;
752 u16 flags = 0;
753 int status;
754
755 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
756
757 if (flags & I40E_AQC_LAN_ADDR_VALID)
758 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
759
760 return status;
761 }
762
763 /**
764 * i40e_get_port_mac_addr - get Port MAC address
765 * @hw: pointer to the HW structure
766 * @mac_addr: pointer to Port MAC address
767 *
768 * Reads the adapter's Port MAC address
769 **/
i40e_get_port_mac_addr(struct i40e_hw * hw,u8 * mac_addr)770 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
771 {
772 struct i40e_aqc_mac_address_read_data addrs;
773 u16 flags = 0;
774 int status;
775
776 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
777 if (status)
778 return status;
779
780 if (flags & I40E_AQC_PORT_ADDR_VALID)
781 ether_addr_copy(mac_addr, addrs.port_mac);
782 else
783 status = -EINVAL;
784
785 return status;
786 }
787
788 /**
789 * i40e_pre_tx_queue_cfg - pre tx queue configure
790 * @hw: pointer to the HW structure
791 * @queue: target PF queue index
792 * @enable: state change request
793 *
794 * Handles hw requirement to indicate intention to enable
795 * or disable target queue.
796 **/
i40e_pre_tx_queue_cfg(struct i40e_hw * hw,u32 queue,bool enable)797 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
798 {
799 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
800 u32 reg_block = 0;
801 u32 reg_val;
802
803 if (abs_queue_idx >= 128) {
804 reg_block = abs_queue_idx / 128;
805 abs_queue_idx %= 128;
806 }
807
808 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
809 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
810 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
811
812 if (enable)
813 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
814 else
815 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
816
817 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
818 }
819
820 /**
821 * i40e_read_pba_string - Reads part number string from EEPROM
822 * @hw: pointer to hardware structure
823 * @pba_num: stores the part number string from the EEPROM
824 * @pba_num_size: part number string buffer length
825 *
826 * Reads the part number string from the EEPROM.
827 **/
i40e_read_pba_string(struct i40e_hw * hw,u8 * pba_num,u32 pba_num_size)828 int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
829 u32 pba_num_size)
830 {
831 u16 pba_word = 0;
832 u16 pba_size = 0;
833 u16 pba_ptr = 0;
834 int status = 0;
835 u16 i = 0;
836
837 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
838 if (status || (pba_word != 0xFAFA)) {
839 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
840 return status;
841 }
842
843 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
844 if (status) {
845 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
846 return status;
847 }
848
849 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
850 if (status) {
851 hw_dbg(hw, "Failed to read PBA Block size.\n");
852 return status;
853 }
854
855 /* Subtract one to get PBA word count (PBA Size word is included in
856 * total size)
857 */
858 pba_size--;
859 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
860 hw_dbg(hw, "Buffer too small for PBA data.\n");
861 return -EINVAL;
862 }
863
864 for (i = 0; i < pba_size; i++) {
865 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
866 if (status) {
867 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
868 return status;
869 }
870
871 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
872 pba_num[(i * 2) + 1] = pba_word & 0xFF;
873 }
874 pba_num[(pba_size * 2)] = '\0';
875
876 return status;
877 }
878
879 /**
880 * i40e_get_media_type - Gets media type
881 * @hw: pointer to the hardware structure
882 **/
i40e_get_media_type(struct i40e_hw * hw)883 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
884 {
885 enum i40e_media_type media;
886
887 switch (hw->phy.link_info.phy_type) {
888 case I40E_PHY_TYPE_10GBASE_SR:
889 case I40E_PHY_TYPE_10GBASE_LR:
890 case I40E_PHY_TYPE_1000BASE_SX:
891 case I40E_PHY_TYPE_1000BASE_LX:
892 case I40E_PHY_TYPE_40GBASE_SR4:
893 case I40E_PHY_TYPE_40GBASE_LR4:
894 case I40E_PHY_TYPE_25GBASE_LR:
895 case I40E_PHY_TYPE_25GBASE_SR:
896 media = I40E_MEDIA_TYPE_FIBER;
897 break;
898 case I40E_PHY_TYPE_100BASE_TX:
899 case I40E_PHY_TYPE_1000BASE_T:
900 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
901 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
902 case I40E_PHY_TYPE_10GBASE_T:
903 media = I40E_MEDIA_TYPE_BASET;
904 break;
905 case I40E_PHY_TYPE_10GBASE_CR1_CU:
906 case I40E_PHY_TYPE_40GBASE_CR4_CU:
907 case I40E_PHY_TYPE_10GBASE_CR1:
908 case I40E_PHY_TYPE_40GBASE_CR4:
909 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
910 case I40E_PHY_TYPE_40GBASE_AOC:
911 case I40E_PHY_TYPE_10GBASE_AOC:
912 case I40E_PHY_TYPE_25GBASE_CR:
913 case I40E_PHY_TYPE_25GBASE_AOC:
914 case I40E_PHY_TYPE_25GBASE_ACC:
915 media = I40E_MEDIA_TYPE_DA;
916 break;
917 case I40E_PHY_TYPE_1000BASE_KX:
918 case I40E_PHY_TYPE_10GBASE_KX4:
919 case I40E_PHY_TYPE_10GBASE_KR:
920 case I40E_PHY_TYPE_40GBASE_KR4:
921 case I40E_PHY_TYPE_20GBASE_KR2:
922 case I40E_PHY_TYPE_25GBASE_KR:
923 media = I40E_MEDIA_TYPE_BACKPLANE;
924 break;
925 case I40E_PHY_TYPE_SGMII:
926 case I40E_PHY_TYPE_XAUI:
927 case I40E_PHY_TYPE_XFI:
928 case I40E_PHY_TYPE_XLAUI:
929 case I40E_PHY_TYPE_XLPPI:
930 default:
931 media = I40E_MEDIA_TYPE_UNKNOWN;
932 break;
933 }
934
935 return media;
936 }
937
938 /**
939 * i40e_poll_globr - Poll for Global Reset completion
940 * @hw: pointer to the hardware structure
941 * @retry_limit: how many times to retry before failure
942 **/
i40e_poll_globr(struct i40e_hw * hw,u32 retry_limit)943 static int i40e_poll_globr(struct i40e_hw *hw,
944 u32 retry_limit)
945 {
946 u32 cnt, reg = 0;
947
948 for (cnt = 0; cnt < retry_limit; cnt++) {
949 reg = rd32(hw, I40E_GLGEN_RSTAT);
950 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
951 return 0;
952 msleep(100);
953 }
954
955 hw_dbg(hw, "Global reset failed.\n");
956 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
957
958 return -EIO;
959 }
960
961 #define I40E_PF_RESET_WAIT_COUNT_A0 200
962 #define I40E_PF_RESET_WAIT_COUNT 200
963 /**
964 * i40e_pf_reset - Reset the PF
965 * @hw: pointer to the hardware structure
966 *
967 * Assuming someone else has triggered a global reset,
968 * assure the global reset is complete and then reset the PF
969 **/
i40e_pf_reset(struct i40e_hw * hw)970 int i40e_pf_reset(struct i40e_hw *hw)
971 {
972 u32 cnt = 0;
973 u32 cnt1 = 0;
974 u32 reg = 0;
975 u32 grst_del;
976
977 /* Poll for Global Reset steady state in case of recent GRST.
978 * The grst delay value is in 100ms units, and we'll wait a
979 * couple counts longer to be sure we don't just miss the end.
980 */
981 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
982 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
983 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
984
985 /* It can take upto 15 secs for GRST steady state.
986 * Bump it to 16 secs max to be safe.
987 */
988 grst_del = grst_del * 20;
989
990 for (cnt = 0; cnt < grst_del; cnt++) {
991 reg = rd32(hw, I40E_GLGEN_RSTAT);
992 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
993 break;
994 msleep(100);
995 }
996 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
997 hw_dbg(hw, "Global reset polling failed to complete.\n");
998 return -EIO;
999 }
1000
1001 /* Now Wait for the FW to be ready */
1002 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1003 reg = rd32(hw, I40E_GLNVM_ULD);
1004 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1005 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1006 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1007 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1008 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1009 break;
1010 }
1011 usleep_range(10000, 20000);
1012 }
1013 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1014 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1015 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1016 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1017 return -EIO;
1018 }
1019
1020 /* If there was a Global Reset in progress when we got here,
1021 * we don't need to do the PF Reset
1022 */
1023 if (!cnt) {
1024 u32 reg2 = 0;
1025 if (hw->revision_id == 0)
1026 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1027 else
1028 cnt = I40E_PF_RESET_WAIT_COUNT;
1029 reg = rd32(hw, I40E_PFGEN_CTRL);
1030 wr32(hw, I40E_PFGEN_CTRL,
1031 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1032 for (; cnt; cnt--) {
1033 reg = rd32(hw, I40E_PFGEN_CTRL);
1034 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1035 break;
1036 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1037 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1038 break;
1039 usleep_range(1000, 2000);
1040 }
1041 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1042 if (i40e_poll_globr(hw, grst_del))
1043 return -EIO;
1044 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1045 hw_dbg(hw, "PF reset polling failed to complete.\n");
1046 return -EIO;
1047 }
1048 }
1049
1050 i40e_clear_pxe_mode(hw);
1051
1052 return 0;
1053 }
1054
1055 /**
1056 * i40e_clear_hw - clear out any left over hw state
1057 * @hw: pointer to the hw struct
1058 *
1059 * Clear queues and interrupts, typically called at init time,
1060 * but after the capabilities have been found so we know how many
1061 * queues and msix vectors have been allocated.
1062 **/
i40e_clear_hw(struct i40e_hw * hw)1063 void i40e_clear_hw(struct i40e_hw *hw)
1064 {
1065 u32 num_queues, base_queue;
1066 u32 num_pf_int;
1067 u32 num_vf_int;
1068 u32 num_vfs;
1069 u32 i, j;
1070 u32 val;
1071 u32 eol = 0x7ff;
1072
1073 /* get number of interrupts, queues, and VFs */
1074 val = rd32(hw, I40E_GLPCI_CNF2);
1075 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1076 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1077 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1078 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1079
1080 val = rd32(hw, I40E_PFLAN_QALLOC);
1081 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1082 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1083 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1084 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1085 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
1086 num_queues = (j - base_queue) + 1;
1087 else
1088 num_queues = 0;
1089
1090 val = rd32(hw, I40E_PF_VT_PFALLOC);
1091 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1092 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1093 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1094 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1095 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
1096 num_vfs = (j - i) + 1;
1097 else
1098 num_vfs = 0;
1099
1100 /* stop all the interrupts */
1101 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1102 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1103 for (i = 0; i < num_pf_int - 2; i++)
1104 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1105
1106 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1107 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1108 wr32(hw, I40E_PFINT_LNKLST0, val);
1109 for (i = 0; i < num_pf_int - 2; i++)
1110 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1111 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1112 for (i = 0; i < num_vfs; i++)
1113 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1114 for (i = 0; i < num_vf_int - 2; i++)
1115 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1116
1117 /* warn the HW of the coming Tx disables */
1118 for (i = 0; i < num_queues; i++) {
1119 u32 abs_queue_idx = base_queue + i;
1120 u32 reg_block = 0;
1121
1122 if (abs_queue_idx >= 128) {
1123 reg_block = abs_queue_idx / 128;
1124 abs_queue_idx %= 128;
1125 }
1126
1127 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1128 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1129 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1130 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1131
1132 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1133 }
1134 udelay(400);
1135
1136 /* stop all the queues */
1137 for (i = 0; i < num_queues; i++) {
1138 wr32(hw, I40E_QINT_TQCTL(i), 0);
1139 wr32(hw, I40E_QTX_ENA(i), 0);
1140 wr32(hw, I40E_QINT_RQCTL(i), 0);
1141 wr32(hw, I40E_QRX_ENA(i), 0);
1142 }
1143
1144 /* short wait for all queue disables to settle */
1145 udelay(50);
1146 }
1147
1148 /**
1149 * i40e_clear_pxe_mode - clear pxe operations mode
1150 * @hw: pointer to the hw struct
1151 *
1152 * Make sure all PXE mode settings are cleared, including things
1153 * like descriptor fetch/write-back mode.
1154 **/
i40e_clear_pxe_mode(struct i40e_hw * hw)1155 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1156 {
1157 u32 reg;
1158
1159 if (i40e_check_asq_alive(hw))
1160 i40e_aq_clear_pxe_mode(hw, NULL);
1161
1162 /* Clear single descriptor fetch/write-back mode */
1163 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1164
1165 if (hw->revision_id == 0) {
1166 /* As a work around clear PXE_MODE instead of setting it */
1167 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1168 } else {
1169 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1170 }
1171 }
1172
1173 /**
1174 * i40e_led_is_mine - helper to find matching led
1175 * @hw: pointer to the hw struct
1176 * @idx: index into GPIO registers
1177 *
1178 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1179 */
i40e_led_is_mine(struct i40e_hw * hw,int idx)1180 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1181 {
1182 u32 gpio_val = 0;
1183 u32 port;
1184
1185 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1186 !hw->func_caps.led[idx])
1187 return 0;
1188 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1189 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1190 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1191
1192 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1193 * if it is not our port then ignore
1194 */
1195 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1196 (port != hw->port))
1197 return 0;
1198
1199 return gpio_val;
1200 }
1201
1202 #define I40E_FW_LED BIT(4)
1203 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1204 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1205
1206 #define I40E_LED0 22
1207
1208 #define I40E_PIN_FUNC_SDP 0x0
1209 #define I40E_PIN_FUNC_LED 0x1
1210
1211 /**
1212 * i40e_led_get - return current on/off mode
1213 * @hw: pointer to the hw struct
1214 *
1215 * The value returned is the 'mode' field as defined in the
1216 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1217 * values are variations of possible behaviors relating to
1218 * blink, link, and wire.
1219 **/
i40e_led_get(struct i40e_hw * hw)1220 u32 i40e_led_get(struct i40e_hw *hw)
1221 {
1222 u32 mode = 0;
1223 int i;
1224
1225 /* as per the documentation GPIO 22-29 are the LED
1226 * GPIO pins named LED0..LED7
1227 */
1228 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1229 u32 gpio_val = i40e_led_is_mine(hw, i);
1230
1231 if (!gpio_val)
1232 continue;
1233
1234 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1235 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1236 break;
1237 }
1238
1239 return mode;
1240 }
1241
1242 /**
1243 * i40e_led_set - set new on/off mode
1244 * @hw: pointer to the hw struct
1245 * @mode: 0=off, 0xf=on (else see manual for mode details)
1246 * @blink: true if the LED should blink when on, false if steady
1247 *
1248 * if this function is used to turn on the blink it should
1249 * be used to disable the blink when restoring the original state.
1250 **/
i40e_led_set(struct i40e_hw * hw,u32 mode,bool blink)1251 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1252 {
1253 int i;
1254
1255 if (mode & ~I40E_LED_MODE_VALID) {
1256 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1257 return;
1258 }
1259
1260 /* as per the documentation GPIO 22-29 are the LED
1261 * GPIO pins named LED0..LED7
1262 */
1263 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1264 u32 gpio_val = i40e_led_is_mine(hw, i);
1265
1266 if (!gpio_val)
1267 continue;
1268
1269 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1270 u32 pin_func = 0;
1271
1272 if (mode & I40E_FW_LED)
1273 pin_func = I40E_PIN_FUNC_SDP;
1274 else
1275 pin_func = I40E_PIN_FUNC_LED;
1276
1277 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1278 gpio_val |= ((pin_func <<
1279 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1280 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1281 }
1282 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1283 /* this & is a bit of paranoia, but serves as a range check */
1284 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1285 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1286
1287 if (blink)
1288 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1289 else
1290 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1291
1292 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1293 break;
1294 }
1295 }
1296
1297 /* Admin command wrappers */
1298
1299 /**
1300 * i40e_aq_get_phy_capabilities
1301 * @hw: pointer to the hw struct
1302 * @abilities: structure for PHY capabilities to be filled
1303 * @qualified_modules: report Qualified Modules
1304 * @report_init: report init capabilities (active are default)
1305 * @cmd_details: pointer to command details structure or NULL
1306 *
1307 * Returns the various PHY abilities supported on the Port.
1308 **/
1309 int
i40e_aq_get_phy_capabilities(struct i40e_hw * hw,bool qualified_modules,bool report_init,struct i40e_aq_get_phy_abilities_resp * abilities,struct i40e_asq_cmd_details * cmd_details)1310 i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1311 bool qualified_modules, bool report_init,
1312 struct i40e_aq_get_phy_abilities_resp *abilities,
1313 struct i40e_asq_cmd_details *cmd_details)
1314 {
1315 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1316 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1317 struct i40e_aq_desc desc;
1318 int status;
1319
1320 if (!abilities)
1321 return -EINVAL;
1322
1323 do {
1324 i40e_fill_default_direct_cmd_desc(&desc,
1325 i40e_aqc_opc_get_phy_abilities);
1326
1327 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1328 if (abilities_size > I40E_AQ_LARGE_BUF)
1329 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1330
1331 if (qualified_modules)
1332 desc.params.external.param0 |=
1333 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1334
1335 if (report_init)
1336 desc.params.external.param0 |=
1337 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1338
1339 status = i40e_asq_send_command(hw, &desc, abilities,
1340 abilities_size, cmd_details);
1341
1342 switch (hw->aq.asq_last_status) {
1343 case I40E_AQ_RC_EIO:
1344 status = -EIO;
1345 break;
1346 case I40E_AQ_RC_EAGAIN:
1347 usleep_range(1000, 2000);
1348 total_delay++;
1349 status = -EIO;
1350 break;
1351 /* also covers I40E_AQ_RC_OK */
1352 default:
1353 break;
1354 }
1355
1356 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1357 (total_delay < max_delay));
1358
1359 if (status)
1360 return status;
1361
1362 if (report_init) {
1363 if (hw->mac.type == I40E_MAC_XL710 &&
1364 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1365 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1366 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1367 } else {
1368 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1369 hw->phy.phy_types |=
1370 ((u64)abilities->phy_type_ext << 32);
1371 }
1372 }
1373
1374 return status;
1375 }
1376
1377 /**
1378 * i40e_aq_set_phy_config
1379 * @hw: pointer to the hw struct
1380 * @config: structure with PHY configuration to be set
1381 * @cmd_details: pointer to command details structure or NULL
1382 *
1383 * Set the various PHY configuration parameters
1384 * supported on the Port.One or more of the Set PHY config parameters may be
1385 * ignored in an MFP mode as the PF may not have the privilege to set some
1386 * of the PHY Config parameters. This status will be indicated by the
1387 * command response.
1388 **/
i40e_aq_set_phy_config(struct i40e_hw * hw,struct i40e_aq_set_phy_config * config,struct i40e_asq_cmd_details * cmd_details)1389 int i40e_aq_set_phy_config(struct i40e_hw *hw,
1390 struct i40e_aq_set_phy_config *config,
1391 struct i40e_asq_cmd_details *cmd_details)
1392 {
1393 struct i40e_aq_desc desc;
1394 struct i40e_aq_set_phy_config *cmd =
1395 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1396 int status;
1397
1398 if (!config)
1399 return -EINVAL;
1400
1401 i40e_fill_default_direct_cmd_desc(&desc,
1402 i40e_aqc_opc_set_phy_config);
1403
1404 *cmd = *config;
1405
1406 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1407
1408 return status;
1409 }
1410
1411 static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw * hw,struct i40e_aq_get_phy_abilities_resp * abilities,bool atomic_restart)1412 i40e_set_fc_status(struct i40e_hw *hw,
1413 struct i40e_aq_get_phy_abilities_resp *abilities,
1414 bool atomic_restart)
1415 {
1416 struct i40e_aq_set_phy_config config;
1417 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1418 u8 pause_mask = 0x0;
1419
1420 switch (fc_mode) {
1421 case I40E_FC_FULL:
1422 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1423 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1424 break;
1425 case I40E_FC_RX_PAUSE:
1426 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1427 break;
1428 case I40E_FC_TX_PAUSE:
1429 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1430 break;
1431 default:
1432 break;
1433 }
1434
1435 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1436 /* clear the old pause settings */
1437 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1438 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1439 /* set the new abilities */
1440 config.abilities |= pause_mask;
1441 /* If the abilities have changed, then set the new config */
1442 if (config.abilities == abilities->abilities)
1443 return 0;
1444
1445 /* Auto restart link so settings take effect */
1446 if (atomic_restart)
1447 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1448 /* Copy over all the old settings */
1449 config.phy_type = abilities->phy_type;
1450 config.phy_type_ext = abilities->phy_type_ext;
1451 config.link_speed = abilities->link_speed;
1452 config.eee_capability = abilities->eee_capability;
1453 config.eeer = abilities->eeer_val;
1454 config.low_power_ctrl = abilities->d3_lpan;
1455 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1456 I40E_AQ_PHY_FEC_CONFIG_MASK;
1457
1458 return i40e_aq_set_phy_config(hw, &config, NULL);
1459 }
1460
1461 /**
1462 * i40e_set_fc
1463 * @hw: pointer to the hw struct
1464 * @aq_failures: buffer to return AdminQ failure information
1465 * @atomic_restart: whether to enable atomic link restart
1466 *
1467 * Set the requested flow control mode using set_phy_config.
1468 **/
i40e_set_fc(struct i40e_hw * hw,u8 * aq_failures,bool atomic_restart)1469 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1470 bool atomic_restart)
1471 {
1472 struct i40e_aq_get_phy_abilities_resp abilities;
1473 int status;
1474
1475 *aq_failures = 0x0;
1476
1477 /* Get the current phy config */
1478 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1479 NULL);
1480 if (status) {
1481 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1482 return status;
1483 }
1484
1485 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1486 if (status)
1487 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1488
1489 /* Update the link info */
1490 status = i40e_update_link_info(hw);
1491 if (status) {
1492 /* Wait a little bit (on 40G cards it sometimes takes a really
1493 * long time for link to come back from the atomic reset)
1494 * and try once more
1495 */
1496 msleep(1000);
1497 status = i40e_update_link_info(hw);
1498 }
1499 if (status)
1500 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1501
1502 return status;
1503 }
1504
1505 /**
1506 * i40e_aq_clear_pxe_mode
1507 * @hw: pointer to the hw struct
1508 * @cmd_details: pointer to command details structure or NULL
1509 *
1510 * Tell the firmware that the driver is taking over from PXE
1511 **/
i40e_aq_clear_pxe_mode(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)1512 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1513 struct i40e_asq_cmd_details *cmd_details)
1514 {
1515 struct i40e_aq_desc desc;
1516 struct i40e_aqc_clear_pxe *cmd =
1517 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1518 int status;
1519
1520 i40e_fill_default_direct_cmd_desc(&desc,
1521 i40e_aqc_opc_clear_pxe_mode);
1522
1523 cmd->rx_cnt = 0x2;
1524
1525 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1526
1527 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1528
1529 return status;
1530 }
1531
1532 /**
1533 * i40e_aq_set_link_restart_an
1534 * @hw: pointer to the hw struct
1535 * @enable_link: if true: enable link, if false: disable link
1536 * @cmd_details: pointer to command details structure or NULL
1537 *
1538 * Sets up the link and restarts the Auto-Negotiation over the link.
1539 **/
i40e_aq_set_link_restart_an(struct i40e_hw * hw,bool enable_link,struct i40e_asq_cmd_details * cmd_details)1540 int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1541 bool enable_link,
1542 struct i40e_asq_cmd_details *cmd_details)
1543 {
1544 struct i40e_aq_desc desc;
1545 struct i40e_aqc_set_link_restart_an *cmd =
1546 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1547 int status;
1548
1549 i40e_fill_default_direct_cmd_desc(&desc,
1550 i40e_aqc_opc_set_link_restart_an);
1551
1552 cmd->command = I40E_AQ_PHY_RESTART_AN;
1553 if (enable_link)
1554 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1555 else
1556 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1557
1558 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1559
1560 return status;
1561 }
1562
1563 /**
1564 * i40e_aq_get_link_info
1565 * @hw: pointer to the hw struct
1566 * @enable_lse: enable/disable LinkStatusEvent reporting
1567 * @link: pointer to link status structure - optional
1568 * @cmd_details: pointer to command details structure or NULL
1569 *
1570 * Returns the link status of the adapter.
1571 **/
i40e_aq_get_link_info(struct i40e_hw * hw,bool enable_lse,struct i40e_link_status * link,struct i40e_asq_cmd_details * cmd_details)1572 int i40e_aq_get_link_info(struct i40e_hw *hw,
1573 bool enable_lse, struct i40e_link_status *link,
1574 struct i40e_asq_cmd_details *cmd_details)
1575 {
1576 struct i40e_aq_desc desc;
1577 struct i40e_aqc_get_link_status *resp =
1578 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1579 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1580 bool tx_pause, rx_pause;
1581 u16 command_flags;
1582 int status;
1583
1584 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1585
1586 if (enable_lse)
1587 command_flags = I40E_AQ_LSE_ENABLE;
1588 else
1589 command_flags = I40E_AQ_LSE_DISABLE;
1590 resp->command_flags = cpu_to_le16(command_flags);
1591
1592 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1593
1594 if (status)
1595 goto aq_get_link_info_exit;
1596
1597 /* save off old link status information */
1598 hw->phy.link_info_old = *hw_link_info;
1599
1600 /* update link status */
1601 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1602 hw->phy.media_type = i40e_get_media_type(hw);
1603 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1604 hw_link_info->link_info = resp->link_info;
1605 hw_link_info->an_info = resp->an_info;
1606 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1607 I40E_AQ_CONFIG_FEC_RS_ENA);
1608 hw_link_info->ext_info = resp->ext_info;
1609 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1610 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1611 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1612
1613 /* update fc info */
1614 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1615 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1616 if (tx_pause & rx_pause)
1617 hw->fc.current_mode = I40E_FC_FULL;
1618 else if (tx_pause)
1619 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1620 else if (rx_pause)
1621 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1622 else
1623 hw->fc.current_mode = I40E_FC_NONE;
1624
1625 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1626 hw_link_info->crc_enable = true;
1627 else
1628 hw_link_info->crc_enable = false;
1629
1630 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1631 hw_link_info->lse_enable = true;
1632 else
1633 hw_link_info->lse_enable = false;
1634
1635 if ((hw->mac.type == I40E_MAC_XL710) &&
1636 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1637 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1638 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1639
1640 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1641 hw->mac.type != I40E_MAC_X722) {
1642 __le32 tmp;
1643
1644 memcpy(&tmp, resp->link_type, sizeof(tmp));
1645 hw->phy.phy_types = le32_to_cpu(tmp);
1646 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1647 }
1648
1649 /* save link status information */
1650 if (link)
1651 *link = *hw_link_info;
1652
1653 /* flag cleared so helper functions don't call AQ again */
1654 hw->phy.get_link_info = false;
1655
1656 aq_get_link_info_exit:
1657 return status;
1658 }
1659
1660 /**
1661 * i40e_aq_set_phy_int_mask
1662 * @hw: pointer to the hw struct
1663 * @mask: interrupt mask to be set
1664 * @cmd_details: pointer to command details structure or NULL
1665 *
1666 * Set link interrupt mask.
1667 **/
i40e_aq_set_phy_int_mask(struct i40e_hw * hw,u16 mask,struct i40e_asq_cmd_details * cmd_details)1668 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1669 u16 mask,
1670 struct i40e_asq_cmd_details *cmd_details)
1671 {
1672 struct i40e_aq_desc desc;
1673 struct i40e_aqc_set_phy_int_mask *cmd =
1674 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1675 int status;
1676
1677 i40e_fill_default_direct_cmd_desc(&desc,
1678 i40e_aqc_opc_set_phy_int_mask);
1679
1680 cmd->event_mask = cpu_to_le16(mask);
1681
1682 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1683
1684 return status;
1685 }
1686
1687 /**
1688 * i40e_aq_set_mac_loopback
1689 * @hw: pointer to the HW struct
1690 * @ena_lpbk: Enable or Disable loopback
1691 * @cmd_details: pointer to command details structure or NULL
1692 *
1693 * Enable/disable loopback on a given port
1694 */
i40e_aq_set_mac_loopback(struct i40e_hw * hw,bool ena_lpbk,struct i40e_asq_cmd_details * cmd_details)1695 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1696 struct i40e_asq_cmd_details *cmd_details)
1697 {
1698 struct i40e_aq_desc desc;
1699 struct i40e_aqc_set_lb_mode *cmd =
1700 (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1701
1702 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1703 if (ena_lpbk) {
1704 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1705 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1706 else
1707 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1708 }
1709
1710 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1711 }
1712
1713 /**
1714 * i40e_aq_set_phy_debug
1715 * @hw: pointer to the hw struct
1716 * @cmd_flags: debug command flags
1717 * @cmd_details: pointer to command details structure or NULL
1718 *
1719 * Reset the external PHY.
1720 **/
i40e_aq_set_phy_debug(struct i40e_hw * hw,u8 cmd_flags,struct i40e_asq_cmd_details * cmd_details)1721 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1722 struct i40e_asq_cmd_details *cmd_details)
1723 {
1724 struct i40e_aq_desc desc;
1725 struct i40e_aqc_set_phy_debug *cmd =
1726 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1727 int status;
1728
1729 i40e_fill_default_direct_cmd_desc(&desc,
1730 i40e_aqc_opc_set_phy_debug);
1731
1732 cmd->command_flags = cmd_flags;
1733
1734 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1735
1736 return status;
1737 }
1738
1739 /**
1740 * i40e_is_aq_api_ver_ge
1741 * @aq: pointer to AdminQ info containing HW API version to compare
1742 * @maj: API major value
1743 * @min: API minor value
1744 *
1745 * Assert whether current HW API version is greater/equal than provided.
1746 **/
i40e_is_aq_api_ver_ge(struct i40e_adminq_info * aq,u16 maj,u16 min)1747 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1748 u16 min)
1749 {
1750 return (aq->api_maj_ver > maj ||
1751 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1752 }
1753
1754 /**
1755 * i40e_aq_add_vsi
1756 * @hw: pointer to the hw struct
1757 * @vsi_ctx: pointer to a vsi context struct
1758 * @cmd_details: pointer to command details structure or NULL
1759 *
1760 * Add a VSI context to the hardware.
1761 **/
i40e_aq_add_vsi(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)1762 int i40e_aq_add_vsi(struct i40e_hw *hw,
1763 struct i40e_vsi_context *vsi_ctx,
1764 struct i40e_asq_cmd_details *cmd_details)
1765 {
1766 struct i40e_aq_desc desc;
1767 struct i40e_aqc_add_get_update_vsi *cmd =
1768 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1769 struct i40e_aqc_add_get_update_vsi_completion *resp =
1770 (struct i40e_aqc_add_get_update_vsi_completion *)
1771 &desc.params.raw;
1772 int status;
1773
1774 i40e_fill_default_direct_cmd_desc(&desc,
1775 i40e_aqc_opc_add_vsi);
1776
1777 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1778 cmd->connection_type = vsi_ctx->connection_type;
1779 cmd->vf_id = vsi_ctx->vf_num;
1780 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1781
1782 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1783
1784 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1785 sizeof(vsi_ctx->info),
1786 cmd_details, true);
1787
1788 if (status)
1789 goto aq_add_vsi_exit;
1790
1791 vsi_ctx->seid = le16_to_cpu(resp->seid);
1792 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1793 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1794 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1795
1796 aq_add_vsi_exit:
1797 return status;
1798 }
1799
1800 /**
1801 * i40e_aq_set_default_vsi
1802 * @hw: pointer to the hw struct
1803 * @seid: vsi number
1804 * @cmd_details: pointer to command details structure or NULL
1805 **/
i40e_aq_set_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1806 int i40e_aq_set_default_vsi(struct i40e_hw *hw,
1807 u16 seid,
1808 struct i40e_asq_cmd_details *cmd_details)
1809 {
1810 struct i40e_aq_desc desc;
1811 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1812 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1813 &desc.params.raw;
1814 int status;
1815
1816 i40e_fill_default_direct_cmd_desc(&desc,
1817 i40e_aqc_opc_set_vsi_promiscuous_modes);
1818
1819 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1820 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1821 cmd->seid = cpu_to_le16(seid);
1822
1823 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1824
1825 return status;
1826 }
1827
1828 /**
1829 * i40e_aq_clear_default_vsi
1830 * @hw: pointer to the hw struct
1831 * @seid: vsi number
1832 * @cmd_details: pointer to command details structure or NULL
1833 **/
i40e_aq_clear_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1834 int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1835 u16 seid,
1836 struct i40e_asq_cmd_details *cmd_details)
1837 {
1838 struct i40e_aq_desc desc;
1839 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1840 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1841 &desc.params.raw;
1842 int status;
1843
1844 i40e_fill_default_direct_cmd_desc(&desc,
1845 i40e_aqc_opc_set_vsi_promiscuous_modes);
1846
1847 cmd->promiscuous_flags = cpu_to_le16(0);
1848 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1849 cmd->seid = cpu_to_le16(seid);
1850
1851 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1852
1853 return status;
1854 }
1855
1856 /**
1857 * i40e_aq_set_vsi_unicast_promiscuous
1858 * @hw: pointer to the hw struct
1859 * @seid: vsi number
1860 * @set: set unicast promiscuous enable/disable
1861 * @cmd_details: pointer to command details structure or NULL
1862 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1863 **/
i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details,bool rx_only_promisc)1864 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1865 u16 seid, bool set,
1866 struct i40e_asq_cmd_details *cmd_details,
1867 bool rx_only_promisc)
1868 {
1869 struct i40e_aq_desc desc;
1870 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1871 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1872 u16 flags = 0;
1873 int status;
1874
1875 i40e_fill_default_direct_cmd_desc(&desc,
1876 i40e_aqc_opc_set_vsi_promiscuous_modes);
1877
1878 if (set) {
1879 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1880 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1881 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1882 }
1883
1884 cmd->promiscuous_flags = cpu_to_le16(flags);
1885
1886 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1887 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1888 cmd->valid_flags |=
1889 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1890
1891 cmd->seid = cpu_to_le16(seid);
1892 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1893
1894 return status;
1895 }
1896
1897 /**
1898 * i40e_aq_set_vsi_multicast_promiscuous
1899 * @hw: pointer to the hw struct
1900 * @seid: vsi number
1901 * @set: set multicast promiscuous enable/disable
1902 * @cmd_details: pointer to command details structure or NULL
1903 **/
i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details)1904 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
1905 u16 seid, bool set,
1906 struct i40e_asq_cmd_details *cmd_details)
1907 {
1908 struct i40e_aq_desc desc;
1909 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1910 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1911 u16 flags = 0;
1912 int status;
1913
1914 i40e_fill_default_direct_cmd_desc(&desc,
1915 i40e_aqc_opc_set_vsi_promiscuous_modes);
1916
1917 if (set)
1918 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1919
1920 cmd->promiscuous_flags = cpu_to_le16(flags);
1921
1922 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1923
1924 cmd->seid = cpu_to_le16(seid);
1925 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1926
1927 return status;
1928 }
1929
1930 /**
1931 * i40e_aq_set_vsi_mc_promisc_on_vlan
1932 * @hw: pointer to the hw struct
1933 * @seid: vsi number
1934 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1935 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
1936 * @cmd_details: pointer to command details structure or NULL
1937 **/
i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1938 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
1939 u16 seid, bool enable,
1940 u16 vid,
1941 struct i40e_asq_cmd_details *cmd_details)
1942 {
1943 struct i40e_aq_desc desc;
1944 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1945 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1946 u16 flags = 0;
1947 int status;
1948
1949 i40e_fill_default_direct_cmd_desc(&desc,
1950 i40e_aqc_opc_set_vsi_promiscuous_modes);
1951
1952 if (enable)
1953 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1954
1955 cmd->promiscuous_flags = cpu_to_le16(flags);
1956 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1957 cmd->seid = cpu_to_le16(seid);
1958 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1959
1960 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1961 cmd_details, true);
1962
1963 return status;
1964 }
1965
1966 /**
1967 * i40e_aq_set_vsi_uc_promisc_on_vlan
1968 * @hw: pointer to the hw struct
1969 * @seid: vsi number
1970 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1971 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
1972 * @cmd_details: pointer to command details structure or NULL
1973 **/
i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1974 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1975 u16 seid, bool enable,
1976 u16 vid,
1977 struct i40e_asq_cmd_details *cmd_details)
1978 {
1979 struct i40e_aq_desc desc;
1980 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1981 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1982 u16 flags = 0;
1983 int status;
1984
1985 i40e_fill_default_direct_cmd_desc(&desc,
1986 i40e_aqc_opc_set_vsi_promiscuous_modes);
1987
1988 if (enable) {
1989 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1990 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1991 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1992 }
1993
1994 cmd->promiscuous_flags = cpu_to_le16(flags);
1995 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1996 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1997 cmd->valid_flags |=
1998 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1999 cmd->seid = cpu_to_le16(seid);
2000 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2001
2002 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2003 cmd_details, true);
2004
2005 return status;
2006 }
2007
2008 /**
2009 * i40e_aq_set_vsi_bc_promisc_on_vlan
2010 * @hw: pointer to the hw struct
2011 * @seid: vsi number
2012 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2013 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2014 * @cmd_details: pointer to command details structure or NULL
2015 **/
i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)2016 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2017 u16 seid, bool enable, u16 vid,
2018 struct i40e_asq_cmd_details *cmd_details)
2019 {
2020 struct i40e_aq_desc desc;
2021 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2022 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2023 u16 flags = 0;
2024 int status;
2025
2026 i40e_fill_default_direct_cmd_desc(&desc,
2027 i40e_aqc_opc_set_vsi_promiscuous_modes);
2028
2029 if (enable)
2030 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2031
2032 cmd->promiscuous_flags = cpu_to_le16(flags);
2033 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2034 cmd->seid = cpu_to_le16(seid);
2035 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2036
2037 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2038
2039 return status;
2040 }
2041
2042 /**
2043 * i40e_aq_set_vsi_broadcast
2044 * @hw: pointer to the hw struct
2045 * @seid: vsi number
2046 * @set_filter: true to set filter, false to clear filter
2047 * @cmd_details: pointer to command details structure or NULL
2048 *
2049 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2050 **/
i40e_aq_set_vsi_broadcast(struct i40e_hw * hw,u16 seid,bool set_filter,struct i40e_asq_cmd_details * cmd_details)2051 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2052 u16 seid, bool set_filter,
2053 struct i40e_asq_cmd_details *cmd_details)
2054 {
2055 struct i40e_aq_desc desc;
2056 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2057 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2058 int status;
2059
2060 i40e_fill_default_direct_cmd_desc(&desc,
2061 i40e_aqc_opc_set_vsi_promiscuous_modes);
2062
2063 if (set_filter)
2064 cmd->promiscuous_flags
2065 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2066 else
2067 cmd->promiscuous_flags
2068 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2069
2070 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2071 cmd->seid = cpu_to_le16(seid);
2072 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2073
2074 return status;
2075 }
2076
2077 /**
2078 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2079 * @hw: pointer to the hw struct
2080 * @seid: vsi number
2081 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2082 * @cmd_details: pointer to command details structure or NULL
2083 **/
i40e_aq_set_vsi_vlan_promisc(struct i40e_hw * hw,u16 seid,bool enable,struct i40e_asq_cmd_details * cmd_details)2084 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2085 u16 seid, bool enable,
2086 struct i40e_asq_cmd_details *cmd_details)
2087 {
2088 struct i40e_aq_desc desc;
2089 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2090 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2091 u16 flags = 0;
2092 int status;
2093
2094 i40e_fill_default_direct_cmd_desc(&desc,
2095 i40e_aqc_opc_set_vsi_promiscuous_modes);
2096 if (enable)
2097 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2098
2099 cmd->promiscuous_flags = cpu_to_le16(flags);
2100 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2101 cmd->seid = cpu_to_le16(seid);
2102
2103 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2104
2105 return status;
2106 }
2107
2108 /**
2109 * i40e_aq_get_vsi_params - get VSI configuration info
2110 * @hw: pointer to the hw struct
2111 * @vsi_ctx: pointer to a vsi context struct
2112 * @cmd_details: pointer to command details structure or NULL
2113 **/
i40e_aq_get_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2114 int i40e_aq_get_vsi_params(struct i40e_hw *hw,
2115 struct i40e_vsi_context *vsi_ctx,
2116 struct i40e_asq_cmd_details *cmd_details)
2117 {
2118 struct i40e_aq_desc desc;
2119 struct i40e_aqc_add_get_update_vsi *cmd =
2120 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2121 struct i40e_aqc_add_get_update_vsi_completion *resp =
2122 (struct i40e_aqc_add_get_update_vsi_completion *)
2123 &desc.params.raw;
2124 int status;
2125
2126 i40e_fill_default_direct_cmd_desc(&desc,
2127 i40e_aqc_opc_get_vsi_parameters);
2128
2129 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2130
2131 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2132
2133 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2134 sizeof(vsi_ctx->info), NULL);
2135
2136 if (status)
2137 goto aq_get_vsi_params_exit;
2138
2139 vsi_ctx->seid = le16_to_cpu(resp->seid);
2140 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2141 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2142 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2143
2144 aq_get_vsi_params_exit:
2145 return status;
2146 }
2147
2148 /**
2149 * i40e_aq_update_vsi_params
2150 * @hw: pointer to the hw struct
2151 * @vsi_ctx: pointer to a vsi context struct
2152 * @cmd_details: pointer to command details structure or NULL
2153 *
2154 * Update a VSI context.
2155 **/
i40e_aq_update_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2156 int i40e_aq_update_vsi_params(struct i40e_hw *hw,
2157 struct i40e_vsi_context *vsi_ctx,
2158 struct i40e_asq_cmd_details *cmd_details)
2159 {
2160 struct i40e_aq_desc desc;
2161 struct i40e_aqc_add_get_update_vsi *cmd =
2162 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2163 struct i40e_aqc_add_get_update_vsi_completion *resp =
2164 (struct i40e_aqc_add_get_update_vsi_completion *)
2165 &desc.params.raw;
2166 int status;
2167
2168 i40e_fill_default_direct_cmd_desc(&desc,
2169 i40e_aqc_opc_update_vsi_parameters);
2170 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2171
2172 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2173
2174 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2175 sizeof(vsi_ctx->info),
2176 cmd_details, true);
2177
2178 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2179 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2180
2181 return status;
2182 }
2183
2184 /**
2185 * i40e_aq_get_switch_config
2186 * @hw: pointer to the hardware structure
2187 * @buf: pointer to the result buffer
2188 * @buf_size: length of input buffer
2189 * @start_seid: seid to start for the report, 0 == beginning
2190 * @cmd_details: pointer to command details structure or NULL
2191 *
2192 * Fill the buf with switch configuration returned from AdminQ command
2193 **/
i40e_aq_get_switch_config(struct i40e_hw * hw,struct i40e_aqc_get_switch_config_resp * buf,u16 buf_size,u16 * start_seid,struct i40e_asq_cmd_details * cmd_details)2194 int i40e_aq_get_switch_config(struct i40e_hw *hw,
2195 struct i40e_aqc_get_switch_config_resp *buf,
2196 u16 buf_size, u16 *start_seid,
2197 struct i40e_asq_cmd_details *cmd_details)
2198 {
2199 struct i40e_aq_desc desc;
2200 struct i40e_aqc_switch_seid *scfg =
2201 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2202 int status;
2203
2204 i40e_fill_default_direct_cmd_desc(&desc,
2205 i40e_aqc_opc_get_switch_config);
2206 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2207 if (buf_size > I40E_AQ_LARGE_BUF)
2208 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2209 scfg->seid = cpu_to_le16(*start_seid);
2210
2211 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2212 *start_seid = le16_to_cpu(scfg->seid);
2213
2214 return status;
2215 }
2216
2217 /**
2218 * i40e_aq_set_switch_config
2219 * @hw: pointer to the hardware structure
2220 * @flags: bit flag values to set
2221 * @mode: cloud filter mode
2222 * @valid_flags: which bit flags to set
2223 * @mode: cloud filter mode
2224 * @cmd_details: pointer to command details structure or NULL
2225 *
2226 * Set switch configuration bits
2227 **/
i40e_aq_set_switch_config(struct i40e_hw * hw,u16 flags,u16 valid_flags,u8 mode,struct i40e_asq_cmd_details * cmd_details)2228 int i40e_aq_set_switch_config(struct i40e_hw *hw,
2229 u16 flags,
2230 u16 valid_flags, u8 mode,
2231 struct i40e_asq_cmd_details *cmd_details)
2232 {
2233 struct i40e_aq_desc desc;
2234 struct i40e_aqc_set_switch_config *scfg =
2235 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2236 int status;
2237
2238 i40e_fill_default_direct_cmd_desc(&desc,
2239 i40e_aqc_opc_set_switch_config);
2240 scfg->flags = cpu_to_le16(flags);
2241 scfg->valid_flags = cpu_to_le16(valid_flags);
2242 scfg->mode = mode;
2243 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2244 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2245 scfg->first_tag = cpu_to_le16(hw->first_tag);
2246 scfg->second_tag = cpu_to_le16(hw->second_tag);
2247 }
2248 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2249
2250 return status;
2251 }
2252
2253 /**
2254 * i40e_aq_get_firmware_version
2255 * @hw: pointer to the hw struct
2256 * @fw_major_version: firmware major version
2257 * @fw_minor_version: firmware minor version
2258 * @fw_build: firmware build number
2259 * @api_major_version: major queue version
2260 * @api_minor_version: minor queue version
2261 * @cmd_details: pointer to command details structure or NULL
2262 *
2263 * Get the firmware version from the admin queue commands
2264 **/
i40e_aq_get_firmware_version(struct i40e_hw * hw,u16 * fw_major_version,u16 * fw_minor_version,u32 * fw_build,u16 * api_major_version,u16 * api_minor_version,struct i40e_asq_cmd_details * cmd_details)2265 int i40e_aq_get_firmware_version(struct i40e_hw *hw,
2266 u16 *fw_major_version, u16 *fw_minor_version,
2267 u32 *fw_build,
2268 u16 *api_major_version, u16 *api_minor_version,
2269 struct i40e_asq_cmd_details *cmd_details)
2270 {
2271 struct i40e_aq_desc desc;
2272 struct i40e_aqc_get_version *resp =
2273 (struct i40e_aqc_get_version *)&desc.params.raw;
2274 int status;
2275
2276 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2277
2278 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2279
2280 if (!status) {
2281 if (fw_major_version)
2282 *fw_major_version = le16_to_cpu(resp->fw_major);
2283 if (fw_minor_version)
2284 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2285 if (fw_build)
2286 *fw_build = le32_to_cpu(resp->fw_build);
2287 if (api_major_version)
2288 *api_major_version = le16_to_cpu(resp->api_major);
2289 if (api_minor_version)
2290 *api_minor_version = le16_to_cpu(resp->api_minor);
2291 }
2292
2293 return status;
2294 }
2295
2296 /**
2297 * i40e_aq_send_driver_version
2298 * @hw: pointer to the hw struct
2299 * @dv: driver's major, minor version
2300 * @cmd_details: pointer to command details structure or NULL
2301 *
2302 * Send the driver version to the firmware
2303 **/
i40e_aq_send_driver_version(struct i40e_hw * hw,struct i40e_driver_version * dv,struct i40e_asq_cmd_details * cmd_details)2304 int i40e_aq_send_driver_version(struct i40e_hw *hw,
2305 struct i40e_driver_version *dv,
2306 struct i40e_asq_cmd_details *cmd_details)
2307 {
2308 struct i40e_aq_desc desc;
2309 struct i40e_aqc_driver_version *cmd =
2310 (struct i40e_aqc_driver_version *)&desc.params.raw;
2311 int status;
2312 u16 len;
2313
2314 if (dv == NULL)
2315 return -EINVAL;
2316
2317 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2318
2319 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2320 cmd->driver_major_ver = dv->major_version;
2321 cmd->driver_minor_ver = dv->minor_version;
2322 cmd->driver_build_ver = dv->build_version;
2323 cmd->driver_subbuild_ver = dv->subbuild_version;
2324
2325 len = 0;
2326 while (len < sizeof(dv->driver_string) &&
2327 (dv->driver_string[len] < 0x80) &&
2328 dv->driver_string[len])
2329 len++;
2330 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2331 len, cmd_details);
2332
2333 return status;
2334 }
2335
2336 /**
2337 * i40e_get_link_status - get status of the HW network link
2338 * @hw: pointer to the hw struct
2339 * @link_up: pointer to bool (true/false = linkup/linkdown)
2340 *
2341 * Variable link_up true if link is up, false if link is down.
2342 * The variable link_up is invalid if returned value of status != 0
2343 *
2344 * Side effect: LinkStatusEvent reporting becomes enabled
2345 **/
i40e_get_link_status(struct i40e_hw * hw,bool * link_up)2346 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2347 {
2348 int status = 0;
2349
2350 if (hw->phy.get_link_info) {
2351 status = i40e_update_link_info(hw);
2352
2353 if (status)
2354 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2355 status);
2356 }
2357
2358 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2359
2360 return status;
2361 }
2362
2363 /**
2364 * i40e_update_link_info - update status of the HW network link
2365 * @hw: pointer to the hw struct
2366 **/
i40e_update_link_info(struct i40e_hw * hw)2367 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
2368 {
2369 struct i40e_aq_get_phy_abilities_resp abilities;
2370 int status = 0;
2371
2372 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2373 if (status)
2374 return status;
2375
2376 /* extra checking needed to ensure link info to user is timely */
2377 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2378 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2379 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2380 status = i40e_aq_get_phy_capabilities(hw, false, false,
2381 &abilities, NULL);
2382 if (status)
2383 return status;
2384
2385 if (abilities.fec_cfg_curr_mod_ext_info &
2386 I40E_AQ_ENABLE_FEC_AUTO)
2387 hw->phy.link_info.req_fec_info =
2388 (I40E_AQ_REQUEST_FEC_KR |
2389 I40E_AQ_REQUEST_FEC_RS);
2390 else
2391 hw->phy.link_info.req_fec_info =
2392 abilities.fec_cfg_curr_mod_ext_info &
2393 (I40E_AQ_REQUEST_FEC_KR |
2394 I40E_AQ_REQUEST_FEC_RS);
2395
2396 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2397 sizeof(hw->phy.link_info.module_type));
2398 }
2399
2400 return status;
2401 }
2402
2403 /**
2404 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2405 * @hw: pointer to the hw struct
2406 * @uplink_seid: the MAC or other gizmo SEID
2407 * @downlink_seid: the VSI SEID
2408 * @enabled_tc: bitmap of TCs to be enabled
2409 * @default_port: true for default port VSI, false for control port
2410 * @veb_seid: pointer to where to put the resulting VEB SEID
2411 * @enable_stats: true to turn on VEB stats
2412 * @cmd_details: pointer to command details structure or NULL
2413 *
2414 * This asks the FW to add a VEB between the uplink and downlink
2415 * elements. If the uplink SEID is 0, this will be a floating VEB.
2416 **/
i40e_aq_add_veb(struct i40e_hw * hw,u16 uplink_seid,u16 downlink_seid,u8 enabled_tc,bool default_port,u16 * veb_seid,bool enable_stats,struct i40e_asq_cmd_details * cmd_details)2417 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2418 u16 downlink_seid, u8 enabled_tc,
2419 bool default_port, u16 *veb_seid,
2420 bool enable_stats,
2421 struct i40e_asq_cmd_details *cmd_details)
2422 {
2423 struct i40e_aq_desc desc;
2424 struct i40e_aqc_add_veb *cmd =
2425 (struct i40e_aqc_add_veb *)&desc.params.raw;
2426 struct i40e_aqc_add_veb_completion *resp =
2427 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2428 u16 veb_flags = 0;
2429 int status;
2430
2431 /* SEIDs need to either both be set or both be 0 for floating VEB */
2432 if (!!uplink_seid != !!downlink_seid)
2433 return -EINVAL;
2434
2435 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2436
2437 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2438 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2439 cmd->enable_tcs = enabled_tc;
2440 if (!uplink_seid)
2441 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2442 if (default_port)
2443 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2444 else
2445 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2446
2447 /* reverse logic here: set the bitflag to disable the stats */
2448 if (!enable_stats)
2449 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2450
2451 cmd->veb_flags = cpu_to_le16(veb_flags);
2452
2453 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2454
2455 if (!status && veb_seid)
2456 *veb_seid = le16_to_cpu(resp->veb_seid);
2457
2458 return status;
2459 }
2460
2461 /**
2462 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2463 * @hw: pointer to the hw struct
2464 * @veb_seid: the SEID of the VEB to query
2465 * @switch_id: the uplink switch id
2466 * @floating: set to true if the VEB is floating
2467 * @statistic_index: index of the stats counter block for this VEB
2468 * @vebs_used: number of VEB's used by function
2469 * @vebs_free: total VEB's not reserved by any function
2470 * @cmd_details: pointer to command details structure or NULL
2471 *
2472 * This retrieves the parameters for a particular VEB, specified by
2473 * uplink_seid, and returns them to the caller.
2474 **/
i40e_aq_get_veb_parameters(struct i40e_hw * hw,u16 veb_seid,u16 * switch_id,bool * floating,u16 * statistic_index,u16 * vebs_used,u16 * vebs_free,struct i40e_asq_cmd_details * cmd_details)2475 int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2476 u16 veb_seid, u16 *switch_id,
2477 bool *floating, u16 *statistic_index,
2478 u16 *vebs_used, u16 *vebs_free,
2479 struct i40e_asq_cmd_details *cmd_details)
2480 {
2481 struct i40e_aq_desc desc;
2482 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2483 (struct i40e_aqc_get_veb_parameters_completion *)
2484 &desc.params.raw;
2485 int status;
2486
2487 if (veb_seid == 0)
2488 return -EINVAL;
2489
2490 i40e_fill_default_direct_cmd_desc(&desc,
2491 i40e_aqc_opc_get_veb_parameters);
2492 cmd_resp->seid = cpu_to_le16(veb_seid);
2493
2494 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2495 if (status)
2496 goto get_veb_exit;
2497
2498 if (switch_id)
2499 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2500 if (statistic_index)
2501 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2502 if (vebs_used)
2503 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2504 if (vebs_free)
2505 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2506 if (floating) {
2507 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2508
2509 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2510 *floating = true;
2511 else
2512 *floating = false;
2513 }
2514
2515 get_veb_exit:
2516 return status;
2517 }
2518
2519 /**
2520 * i40e_prepare_add_macvlan
2521 * @mv_list: list of macvlans to be added
2522 * @desc: pointer to AQ descriptor structure
2523 * @count: length of the list
2524 * @seid: VSI for the mac address
2525 *
2526 * Internal helper function that prepares the add macvlan request
2527 * and returns the buffer size.
2528 **/
2529 static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data * mv_list,struct i40e_aq_desc * desc,u16 count,u16 seid)2530 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2531 struct i40e_aq_desc *desc, u16 count, u16 seid)
2532 {
2533 struct i40e_aqc_macvlan *cmd =
2534 (struct i40e_aqc_macvlan *)&desc->params.raw;
2535 u16 buf_size;
2536 int i;
2537
2538 buf_size = count * sizeof(*mv_list);
2539
2540 /* prep the rest of the request */
2541 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2542 cmd->num_addresses = cpu_to_le16(count);
2543 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2544 cmd->seid[1] = 0;
2545 cmd->seid[2] = 0;
2546
2547 for (i = 0; i < count; i++)
2548 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2549 mv_list[i].flags |=
2550 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2551
2552 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2553 if (buf_size > I40E_AQ_LARGE_BUF)
2554 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2555
2556 return buf_size;
2557 }
2558
2559 /**
2560 * i40e_aq_add_macvlan
2561 * @hw: pointer to the hw struct
2562 * @seid: VSI for the mac address
2563 * @mv_list: list of macvlans to be added
2564 * @count: length of the list
2565 * @cmd_details: pointer to command details structure or NULL
2566 *
2567 * Add MAC/VLAN addresses to the HW filtering
2568 **/
2569 int
i40e_aq_add_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2570 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2571 struct i40e_aqc_add_macvlan_element_data *mv_list,
2572 u16 count, struct i40e_asq_cmd_details *cmd_details)
2573 {
2574 struct i40e_aq_desc desc;
2575 u16 buf_size;
2576
2577 if (count == 0 || !mv_list || !hw)
2578 return -EINVAL;
2579
2580 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2581
2582 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2583 cmd_details, true);
2584 }
2585
2586 /**
2587 * i40e_aq_add_macvlan_v2
2588 * @hw: pointer to the hw struct
2589 * @seid: VSI for the mac address
2590 * @mv_list: list of macvlans to be added
2591 * @count: length of the list
2592 * @cmd_details: pointer to command details structure or NULL
2593 * @aq_status: pointer to Admin Queue status return value
2594 *
2595 * Add MAC/VLAN addresses to the HW filtering.
2596 * The _v2 version returns the last Admin Queue status in aq_status
2597 * to avoid race conditions in access to hw->aq.asq_last_status.
2598 * It also calls _v2 versions of asq_send_command functions to
2599 * get the aq_status on the stack.
2600 **/
2601 int
i40e_aq_add_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2602 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2603 struct i40e_aqc_add_macvlan_element_data *mv_list,
2604 u16 count, struct i40e_asq_cmd_details *cmd_details,
2605 enum i40e_admin_queue_err *aq_status)
2606 {
2607 struct i40e_aq_desc desc;
2608 u16 buf_size;
2609
2610 if (count == 0 || !mv_list || !hw)
2611 return -EINVAL;
2612
2613 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2614
2615 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2616 cmd_details, true, aq_status);
2617 }
2618
2619 /**
2620 * i40e_aq_remove_macvlan
2621 * @hw: pointer to the hw struct
2622 * @seid: VSI for the mac address
2623 * @mv_list: list of macvlans to be removed
2624 * @count: length of the list
2625 * @cmd_details: pointer to command details structure or NULL
2626 *
2627 * Remove MAC/VLAN addresses from the HW filtering
2628 **/
2629 int
i40e_aq_remove_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2630 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2631 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2632 u16 count, struct i40e_asq_cmd_details *cmd_details)
2633 {
2634 struct i40e_aq_desc desc;
2635 struct i40e_aqc_macvlan *cmd =
2636 (struct i40e_aqc_macvlan *)&desc.params.raw;
2637 u16 buf_size;
2638 int status;
2639
2640 if (count == 0 || !mv_list || !hw)
2641 return -EINVAL;
2642
2643 buf_size = count * sizeof(*mv_list);
2644
2645 /* prep the rest of the request */
2646 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2647 cmd->num_addresses = cpu_to_le16(count);
2648 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2649 cmd->seid[1] = 0;
2650 cmd->seid[2] = 0;
2651
2652 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2653 if (buf_size > I40E_AQ_LARGE_BUF)
2654 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2655
2656 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2657 cmd_details, true);
2658
2659 return status;
2660 }
2661
2662 /**
2663 * i40e_aq_remove_macvlan_v2
2664 * @hw: pointer to the hw struct
2665 * @seid: VSI for the mac address
2666 * @mv_list: list of macvlans to be removed
2667 * @count: length of the list
2668 * @cmd_details: pointer to command details structure or NULL
2669 * @aq_status: pointer to Admin Queue status return value
2670 *
2671 * Remove MAC/VLAN addresses from the HW filtering.
2672 * The _v2 version returns the last Admin Queue status in aq_status
2673 * to avoid race conditions in access to hw->aq.asq_last_status.
2674 * It also calls _v2 versions of asq_send_command functions to
2675 * get the aq_status on the stack.
2676 **/
2677 int
i40e_aq_remove_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2678 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2679 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2680 u16 count, struct i40e_asq_cmd_details *cmd_details,
2681 enum i40e_admin_queue_err *aq_status)
2682 {
2683 struct i40e_aqc_macvlan *cmd;
2684 struct i40e_aq_desc desc;
2685 u16 buf_size;
2686
2687 if (count == 0 || !mv_list || !hw)
2688 return -EINVAL;
2689
2690 buf_size = count * sizeof(*mv_list);
2691
2692 /* prep the rest of the request */
2693 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2694 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2695 cmd->num_addresses = cpu_to_le16(count);
2696 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2697 cmd->seid[1] = 0;
2698 cmd->seid[2] = 0;
2699
2700 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2701 if (buf_size > I40E_AQ_LARGE_BUF)
2702 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2703
2704 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2705 cmd_details, true, aq_status);
2706 }
2707
2708 /**
2709 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2710 * @hw: pointer to the hw struct
2711 * @opcode: AQ opcode for add or delete mirror rule
2712 * @sw_seid: Switch SEID (to which rule refers)
2713 * @rule_type: Rule Type (ingress/egress/VLAN)
2714 * @id: Destination VSI SEID or Rule ID
2715 * @count: length of the list
2716 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2717 * @cmd_details: pointer to command details structure or NULL
2718 * @rule_id: Rule ID returned from FW
2719 * @rules_used: Number of rules used in internal switch
2720 * @rules_free: Number of rules free in internal switch
2721 *
2722 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2723 * VEBs/VEPA elements only
2724 **/
i40e_mirrorrule_op(struct i40e_hw * hw,u16 opcode,u16 sw_seid,u16 rule_type,u16 id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2725 static int i40e_mirrorrule_op(struct i40e_hw *hw,
2726 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2727 u16 count, __le16 *mr_list,
2728 struct i40e_asq_cmd_details *cmd_details,
2729 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2730 {
2731 struct i40e_aq_desc desc;
2732 struct i40e_aqc_add_delete_mirror_rule *cmd =
2733 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2734 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2735 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2736 u16 buf_size;
2737 int status;
2738
2739 buf_size = count * sizeof(*mr_list);
2740
2741 /* prep the rest of the request */
2742 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2743 cmd->seid = cpu_to_le16(sw_seid);
2744 cmd->rule_type = cpu_to_le16(rule_type &
2745 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2746 cmd->num_entries = cpu_to_le16(count);
2747 /* Dest VSI for add, rule_id for delete */
2748 cmd->destination = cpu_to_le16(id);
2749 if (mr_list) {
2750 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2751 I40E_AQ_FLAG_RD));
2752 if (buf_size > I40E_AQ_LARGE_BUF)
2753 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2754 }
2755
2756 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2757 cmd_details);
2758 if (!status ||
2759 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2760 if (rule_id)
2761 *rule_id = le16_to_cpu(resp->rule_id);
2762 if (rules_used)
2763 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2764 if (rules_free)
2765 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2766 }
2767 return status;
2768 }
2769
2770 /**
2771 * i40e_aq_add_mirrorrule - add a mirror rule
2772 * @hw: pointer to the hw struct
2773 * @sw_seid: Switch SEID (to which rule refers)
2774 * @rule_type: Rule Type (ingress/egress/VLAN)
2775 * @dest_vsi: SEID of VSI to which packets will be mirrored
2776 * @count: length of the list
2777 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2778 * @cmd_details: pointer to command details structure or NULL
2779 * @rule_id: Rule ID returned from FW
2780 * @rules_used: Number of rules used in internal switch
2781 * @rules_free: Number of rules free in internal switch
2782 *
2783 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2784 **/
i40e_aq_add_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 dest_vsi,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2785 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2786 u16 rule_type, u16 dest_vsi, u16 count,
2787 __le16 *mr_list,
2788 struct i40e_asq_cmd_details *cmd_details,
2789 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2790 {
2791 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2792 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2793 if (count == 0 || !mr_list)
2794 return -EINVAL;
2795 }
2796
2797 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2798 rule_type, dest_vsi, count, mr_list,
2799 cmd_details, rule_id, rules_used, rules_free);
2800 }
2801
2802 /**
2803 * i40e_aq_delete_mirrorrule - delete a mirror rule
2804 * @hw: pointer to the hw struct
2805 * @sw_seid: Switch SEID (to which rule refers)
2806 * @rule_type: Rule Type (ingress/egress/VLAN)
2807 * @count: length of the list
2808 * @rule_id: Rule ID that is returned in the receive desc as part of
2809 * add_mirrorrule.
2810 * @mr_list: list of mirrored VLAN IDs to be removed
2811 * @cmd_details: pointer to command details structure or NULL
2812 * @rules_used: Number of rules used in internal switch
2813 * @rules_free: Number of rules free in internal switch
2814 *
2815 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2816 **/
i40e_aq_delete_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 rule_id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rules_used,u16 * rules_free)2817 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2818 u16 rule_type, u16 rule_id, u16 count,
2819 __le16 *mr_list,
2820 struct i40e_asq_cmd_details *cmd_details,
2821 u16 *rules_used, u16 *rules_free)
2822 {
2823 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2824 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2825 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2826 * mirroring. For other rule_type, count and rule_type should
2827 * not matter.
2828 */
2829 if (count == 0 || !mr_list)
2830 return -EINVAL;
2831 }
2832
2833 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2834 rule_type, rule_id, count, mr_list,
2835 cmd_details, NULL, rules_used, rules_free);
2836 }
2837
2838 /**
2839 * i40e_aq_send_msg_to_vf
2840 * @hw: pointer to the hardware structure
2841 * @vfid: VF id to send msg
2842 * @v_opcode: opcodes for VF-PF communication
2843 * @v_retval: return error code
2844 * @msg: pointer to the msg buffer
2845 * @msglen: msg length
2846 * @cmd_details: pointer to command details
2847 *
2848 * send msg to vf
2849 **/
i40e_aq_send_msg_to_vf(struct i40e_hw * hw,u16 vfid,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen,struct i40e_asq_cmd_details * cmd_details)2850 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2851 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2852 struct i40e_asq_cmd_details *cmd_details)
2853 {
2854 struct i40e_aq_desc desc;
2855 struct i40e_aqc_pf_vf_message *cmd =
2856 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2857 int status;
2858
2859 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2860 cmd->id = cpu_to_le32(vfid);
2861 desc.cookie_high = cpu_to_le32(v_opcode);
2862 desc.cookie_low = cpu_to_le32(v_retval);
2863 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2864 if (msglen) {
2865 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2866 I40E_AQ_FLAG_RD));
2867 if (msglen > I40E_AQ_LARGE_BUF)
2868 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2869 desc.datalen = cpu_to_le16(msglen);
2870 }
2871 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2872
2873 return status;
2874 }
2875
2876 /**
2877 * i40e_aq_debug_read_register
2878 * @hw: pointer to the hw struct
2879 * @reg_addr: register address
2880 * @reg_val: register value
2881 * @cmd_details: pointer to command details structure or NULL
2882 *
2883 * Read the register using the admin queue commands
2884 **/
i40e_aq_debug_read_register(struct i40e_hw * hw,u32 reg_addr,u64 * reg_val,struct i40e_asq_cmd_details * cmd_details)2885 int i40e_aq_debug_read_register(struct i40e_hw *hw,
2886 u32 reg_addr, u64 *reg_val,
2887 struct i40e_asq_cmd_details *cmd_details)
2888 {
2889 struct i40e_aq_desc desc;
2890 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2891 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2892 int status;
2893
2894 if (reg_val == NULL)
2895 return -EINVAL;
2896
2897 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2898
2899 cmd_resp->address = cpu_to_le32(reg_addr);
2900
2901 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2902
2903 if (!status) {
2904 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2905 (u64)le32_to_cpu(cmd_resp->value_low);
2906 }
2907
2908 return status;
2909 }
2910
2911 /**
2912 * i40e_aq_debug_write_register
2913 * @hw: pointer to the hw struct
2914 * @reg_addr: register address
2915 * @reg_val: register value
2916 * @cmd_details: pointer to command details structure or NULL
2917 *
2918 * Write to a register using the admin queue commands
2919 **/
i40e_aq_debug_write_register(struct i40e_hw * hw,u32 reg_addr,u64 reg_val,struct i40e_asq_cmd_details * cmd_details)2920 int i40e_aq_debug_write_register(struct i40e_hw *hw,
2921 u32 reg_addr, u64 reg_val,
2922 struct i40e_asq_cmd_details *cmd_details)
2923 {
2924 struct i40e_aq_desc desc;
2925 struct i40e_aqc_debug_reg_read_write *cmd =
2926 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2927 int status;
2928
2929 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2930
2931 cmd->address = cpu_to_le32(reg_addr);
2932 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2933 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2934
2935 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2936
2937 return status;
2938 }
2939
2940 /**
2941 * i40e_aq_request_resource
2942 * @hw: pointer to the hw struct
2943 * @resource: resource id
2944 * @access: access type
2945 * @sdp_number: resource number
2946 * @timeout: the maximum time in ms that the driver may hold the resource
2947 * @cmd_details: pointer to command details structure or NULL
2948 *
2949 * requests common resource using the admin queue commands
2950 **/
i40e_aq_request_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,enum i40e_aq_resource_access_type access,u8 sdp_number,u64 * timeout,struct i40e_asq_cmd_details * cmd_details)2951 int i40e_aq_request_resource(struct i40e_hw *hw,
2952 enum i40e_aq_resources_ids resource,
2953 enum i40e_aq_resource_access_type access,
2954 u8 sdp_number, u64 *timeout,
2955 struct i40e_asq_cmd_details *cmd_details)
2956 {
2957 struct i40e_aq_desc desc;
2958 struct i40e_aqc_request_resource *cmd_resp =
2959 (struct i40e_aqc_request_resource *)&desc.params.raw;
2960 int status;
2961
2962 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2963
2964 cmd_resp->resource_id = cpu_to_le16(resource);
2965 cmd_resp->access_type = cpu_to_le16(access);
2966 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2967
2968 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2969 /* The completion specifies the maximum time in ms that the driver
2970 * may hold the resource in the Timeout field.
2971 * If the resource is held by someone else, the command completes with
2972 * busy return value and the timeout field indicates the maximum time
2973 * the current owner of the resource has to free it.
2974 */
2975 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2976 *timeout = le32_to_cpu(cmd_resp->timeout);
2977
2978 return status;
2979 }
2980
2981 /**
2982 * i40e_aq_release_resource
2983 * @hw: pointer to the hw struct
2984 * @resource: resource id
2985 * @sdp_number: resource number
2986 * @cmd_details: pointer to command details structure or NULL
2987 *
2988 * release common resource using the admin queue commands
2989 **/
i40e_aq_release_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,u8 sdp_number,struct i40e_asq_cmd_details * cmd_details)2990 int i40e_aq_release_resource(struct i40e_hw *hw,
2991 enum i40e_aq_resources_ids resource,
2992 u8 sdp_number,
2993 struct i40e_asq_cmd_details *cmd_details)
2994 {
2995 struct i40e_aq_desc desc;
2996 struct i40e_aqc_request_resource *cmd =
2997 (struct i40e_aqc_request_resource *)&desc.params.raw;
2998 int status;
2999
3000 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3001
3002 cmd->resource_id = cpu_to_le16(resource);
3003 cmd->resource_number = cpu_to_le32(sdp_number);
3004
3005 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3006
3007 return status;
3008 }
3009
3010 /**
3011 * i40e_aq_read_nvm
3012 * @hw: pointer to the hw struct
3013 * @module_pointer: module pointer location in words from the NVM beginning
3014 * @offset: byte offset from the module beginning
3015 * @length: length of the section to be read (in bytes from the offset)
3016 * @data: command buffer (size [bytes] = length)
3017 * @last_command: tells if this is the last command in a series
3018 * @cmd_details: pointer to command details structure or NULL
3019 *
3020 * Read the NVM using the admin queue commands
3021 **/
i40e_aq_read_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,struct i40e_asq_cmd_details * cmd_details)3022 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3023 u32 offset, u16 length, void *data,
3024 bool last_command,
3025 struct i40e_asq_cmd_details *cmd_details)
3026 {
3027 struct i40e_aq_desc desc;
3028 struct i40e_aqc_nvm_update *cmd =
3029 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3030 int status;
3031
3032 /* In offset the highest byte must be zeroed. */
3033 if (offset & 0xFF000000) {
3034 status = -EINVAL;
3035 goto i40e_aq_read_nvm_exit;
3036 }
3037
3038 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3039
3040 /* If this is the last command in a series, set the proper flag. */
3041 if (last_command)
3042 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3043 cmd->module_pointer = module_pointer;
3044 cmd->offset = cpu_to_le32(offset);
3045 cmd->length = cpu_to_le16(length);
3046
3047 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3048 if (length > I40E_AQ_LARGE_BUF)
3049 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3050
3051 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3052
3053 i40e_aq_read_nvm_exit:
3054 return status;
3055 }
3056
3057 /**
3058 * i40e_aq_erase_nvm
3059 * @hw: pointer to the hw struct
3060 * @module_pointer: module pointer location in words from the NVM beginning
3061 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3062 * @length: length of the section to be erased (expressed in 4 KB)
3063 * @last_command: tells if this is the last command in a series
3064 * @cmd_details: pointer to command details structure or NULL
3065 *
3066 * Erase the NVM sector using the admin queue commands
3067 **/
i40e_aq_erase_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,bool last_command,struct i40e_asq_cmd_details * cmd_details)3068 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3069 u32 offset, u16 length, bool last_command,
3070 struct i40e_asq_cmd_details *cmd_details)
3071 {
3072 struct i40e_aq_desc desc;
3073 struct i40e_aqc_nvm_update *cmd =
3074 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3075 int status;
3076
3077 /* In offset the highest byte must be zeroed. */
3078 if (offset & 0xFF000000) {
3079 status = -EINVAL;
3080 goto i40e_aq_erase_nvm_exit;
3081 }
3082
3083 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3084
3085 /* If this is the last command in a series, set the proper flag. */
3086 if (last_command)
3087 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3088 cmd->module_pointer = module_pointer;
3089 cmd->offset = cpu_to_le32(offset);
3090 cmd->length = cpu_to_le16(length);
3091
3092 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3093
3094 i40e_aq_erase_nvm_exit:
3095 return status;
3096 }
3097
3098 /**
3099 * i40e_parse_discover_capabilities
3100 * @hw: pointer to the hw struct
3101 * @buff: pointer to a buffer containing device/function capability records
3102 * @cap_count: number of capability records in the list
3103 * @list_type_opc: type of capabilities list to parse
3104 *
3105 * Parse the device/function capabilities list.
3106 **/
i40e_parse_discover_capabilities(struct i40e_hw * hw,void * buff,u32 cap_count,enum i40e_admin_queue_opc list_type_opc)3107 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3108 u32 cap_count,
3109 enum i40e_admin_queue_opc list_type_opc)
3110 {
3111 struct i40e_aqc_list_capabilities_element_resp *cap;
3112 u32 valid_functions, num_functions;
3113 u32 number, logical_id, phys_id;
3114 struct i40e_hw_capabilities *p;
3115 u16 id, ocp_cfg_word0;
3116 u8 major_rev;
3117 int status;
3118 u32 i = 0;
3119
3120 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3121
3122 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3123 p = &hw->dev_caps;
3124 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3125 p = &hw->func_caps;
3126 else
3127 return;
3128
3129 for (i = 0; i < cap_count; i++, cap++) {
3130 id = le16_to_cpu(cap->id);
3131 number = le32_to_cpu(cap->number);
3132 logical_id = le32_to_cpu(cap->logical_id);
3133 phys_id = le32_to_cpu(cap->phys_id);
3134 major_rev = cap->major_rev;
3135
3136 switch (id) {
3137 case I40E_AQ_CAP_ID_SWITCH_MODE:
3138 p->switch_mode = number;
3139 break;
3140 case I40E_AQ_CAP_ID_MNG_MODE:
3141 p->management_mode = number;
3142 if (major_rev > 1) {
3143 p->mng_protocols_over_mctp = logical_id;
3144 i40e_debug(hw, I40E_DEBUG_INIT,
3145 "HW Capability: Protocols over MCTP = %d\n",
3146 p->mng_protocols_over_mctp);
3147 } else {
3148 p->mng_protocols_over_mctp = 0;
3149 }
3150 break;
3151 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3152 p->npar_enable = number;
3153 break;
3154 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3155 p->os2bmc = number;
3156 break;
3157 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3158 p->valid_functions = number;
3159 break;
3160 case I40E_AQ_CAP_ID_SRIOV:
3161 if (number == 1)
3162 p->sr_iov_1_1 = true;
3163 break;
3164 case I40E_AQ_CAP_ID_VF:
3165 p->num_vfs = number;
3166 p->vf_base_id = logical_id;
3167 break;
3168 case I40E_AQ_CAP_ID_VMDQ:
3169 if (number == 1)
3170 p->vmdq = true;
3171 break;
3172 case I40E_AQ_CAP_ID_8021QBG:
3173 if (number == 1)
3174 p->evb_802_1_qbg = true;
3175 break;
3176 case I40E_AQ_CAP_ID_8021QBR:
3177 if (number == 1)
3178 p->evb_802_1_qbh = true;
3179 break;
3180 case I40E_AQ_CAP_ID_VSI:
3181 p->num_vsis = number;
3182 break;
3183 case I40E_AQ_CAP_ID_DCB:
3184 if (number == 1) {
3185 p->dcb = true;
3186 p->enabled_tcmap = logical_id;
3187 p->maxtc = phys_id;
3188 }
3189 break;
3190 case I40E_AQ_CAP_ID_FCOE:
3191 if (number == 1)
3192 p->fcoe = true;
3193 break;
3194 case I40E_AQ_CAP_ID_ISCSI:
3195 if (number == 1)
3196 p->iscsi = true;
3197 break;
3198 case I40E_AQ_CAP_ID_RSS:
3199 p->rss = true;
3200 p->rss_table_size = number;
3201 p->rss_table_entry_width = logical_id;
3202 break;
3203 case I40E_AQ_CAP_ID_RXQ:
3204 p->num_rx_qp = number;
3205 p->base_queue = phys_id;
3206 break;
3207 case I40E_AQ_CAP_ID_TXQ:
3208 p->num_tx_qp = number;
3209 p->base_queue = phys_id;
3210 break;
3211 case I40E_AQ_CAP_ID_MSIX:
3212 p->num_msix_vectors = number;
3213 i40e_debug(hw, I40E_DEBUG_INIT,
3214 "HW Capability: MSIX vector count = %d\n",
3215 p->num_msix_vectors);
3216 break;
3217 case I40E_AQ_CAP_ID_VF_MSIX:
3218 p->num_msix_vectors_vf = number;
3219 break;
3220 case I40E_AQ_CAP_ID_FLEX10:
3221 if (major_rev == 1) {
3222 if (number == 1) {
3223 p->flex10_enable = true;
3224 p->flex10_capable = true;
3225 }
3226 } else {
3227 /* Capability revision >= 2 */
3228 if (number & 1)
3229 p->flex10_enable = true;
3230 if (number & 2)
3231 p->flex10_capable = true;
3232 }
3233 p->flex10_mode = logical_id;
3234 p->flex10_status = phys_id;
3235 break;
3236 case I40E_AQ_CAP_ID_CEM:
3237 if (number == 1)
3238 p->mgmt_cem = true;
3239 break;
3240 case I40E_AQ_CAP_ID_IWARP:
3241 if (number == 1)
3242 p->iwarp = true;
3243 break;
3244 case I40E_AQ_CAP_ID_LED:
3245 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3246 p->led[phys_id] = true;
3247 break;
3248 case I40E_AQ_CAP_ID_SDP:
3249 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3250 p->sdp[phys_id] = true;
3251 break;
3252 case I40E_AQ_CAP_ID_MDIO:
3253 if (number == 1) {
3254 p->mdio_port_num = phys_id;
3255 p->mdio_port_mode = logical_id;
3256 }
3257 break;
3258 case I40E_AQ_CAP_ID_1588:
3259 if (number == 1)
3260 p->ieee_1588 = true;
3261 break;
3262 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3263 p->fd = true;
3264 p->fd_filters_guaranteed = number;
3265 p->fd_filters_best_effort = logical_id;
3266 break;
3267 case I40E_AQ_CAP_ID_WSR_PROT:
3268 p->wr_csr_prot = (u64)number;
3269 p->wr_csr_prot |= (u64)logical_id << 32;
3270 break;
3271 case I40E_AQ_CAP_ID_NVM_MGMT:
3272 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3273 p->sec_rev_disabled = true;
3274 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3275 p->update_disabled = true;
3276 break;
3277 default:
3278 break;
3279 }
3280 }
3281
3282 if (p->fcoe)
3283 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3284
3285 /* Software override ensuring FCoE is disabled if npar or mfp
3286 * mode because it is not supported in these modes.
3287 */
3288 if (p->npar_enable || p->flex10_enable)
3289 p->fcoe = false;
3290
3291 /* count the enabled ports (aka the "not disabled" ports) */
3292 hw->num_ports = 0;
3293 for (i = 0; i < 4; i++) {
3294 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3295 u64 port_cfg = 0;
3296
3297 /* use AQ read to get the physical register offset instead
3298 * of the port relative offset
3299 */
3300 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3301 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3302 hw->num_ports++;
3303 }
3304
3305 /* OCP cards case: if a mezz is removed the Ethernet port is at
3306 * disabled state in PRTGEN_CNF register. Additional NVM read is
3307 * needed in order to check if we are dealing with OCP card.
3308 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3309 * physical ports results in wrong partition id calculation and thus
3310 * not supporting WoL.
3311 */
3312 if (hw->mac.type == I40E_MAC_X722) {
3313 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3314 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3315 2 * I40E_SR_OCP_CFG_WORD0,
3316 sizeof(ocp_cfg_word0),
3317 &ocp_cfg_word0, true, NULL);
3318 if (!status &&
3319 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3320 hw->num_ports = 4;
3321 i40e_release_nvm(hw);
3322 }
3323 }
3324
3325 valid_functions = p->valid_functions;
3326 num_functions = 0;
3327 while (valid_functions) {
3328 if (valid_functions & 1)
3329 num_functions++;
3330 valid_functions >>= 1;
3331 }
3332
3333 /* partition id is 1-based, and functions are evenly spread
3334 * across the ports as partitions
3335 */
3336 if (hw->num_ports != 0) {
3337 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3338 hw->num_partitions = num_functions / hw->num_ports;
3339 }
3340
3341 /* additional HW specific goodies that might
3342 * someday be HW version specific
3343 */
3344 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3345 }
3346
3347 /**
3348 * i40e_aq_discover_capabilities
3349 * @hw: pointer to the hw struct
3350 * @buff: a virtual buffer to hold the capabilities
3351 * @buff_size: Size of the virtual buffer
3352 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3353 * @list_type_opc: capabilities type to discover - pass in the command opcode
3354 * @cmd_details: pointer to command details structure or NULL
3355 *
3356 * Get the device capabilities descriptions from the firmware
3357 **/
i40e_aq_discover_capabilities(struct i40e_hw * hw,void * buff,u16 buff_size,u16 * data_size,enum i40e_admin_queue_opc list_type_opc,struct i40e_asq_cmd_details * cmd_details)3358 int i40e_aq_discover_capabilities(struct i40e_hw *hw,
3359 void *buff, u16 buff_size, u16 *data_size,
3360 enum i40e_admin_queue_opc list_type_opc,
3361 struct i40e_asq_cmd_details *cmd_details)
3362 {
3363 struct i40e_aqc_list_capabilites *cmd;
3364 struct i40e_aq_desc desc;
3365 int status = 0;
3366
3367 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3368
3369 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3370 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3371 status = -EINVAL;
3372 goto exit;
3373 }
3374
3375 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3376
3377 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3378 if (buff_size > I40E_AQ_LARGE_BUF)
3379 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3380
3381 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3382 *data_size = le16_to_cpu(desc.datalen);
3383
3384 if (status)
3385 goto exit;
3386
3387 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3388 list_type_opc);
3389
3390 exit:
3391 return status;
3392 }
3393
3394 /**
3395 * i40e_aq_update_nvm
3396 * @hw: pointer to the hw struct
3397 * @module_pointer: module pointer location in words from the NVM beginning
3398 * @offset: byte offset from the module beginning
3399 * @length: length of the section to be written (in bytes from the offset)
3400 * @data: command buffer (size [bytes] = length)
3401 * @last_command: tells if this is the last command in a series
3402 * @preservation_flags: Preservation mode flags
3403 * @cmd_details: pointer to command details structure or NULL
3404 *
3405 * Update the NVM using the admin queue commands
3406 **/
i40e_aq_update_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,u8 preservation_flags,struct i40e_asq_cmd_details * cmd_details)3407 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3408 u32 offset, u16 length, void *data,
3409 bool last_command, u8 preservation_flags,
3410 struct i40e_asq_cmd_details *cmd_details)
3411 {
3412 struct i40e_aq_desc desc;
3413 struct i40e_aqc_nvm_update *cmd =
3414 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3415 int status;
3416
3417 /* In offset the highest byte must be zeroed. */
3418 if (offset & 0xFF000000) {
3419 status = -EINVAL;
3420 goto i40e_aq_update_nvm_exit;
3421 }
3422
3423 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3424
3425 /* If this is the last command in a series, set the proper flag. */
3426 if (last_command)
3427 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3428 if (hw->mac.type == I40E_MAC_X722) {
3429 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3430 cmd->command_flags |=
3431 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3432 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3433 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3434 cmd->command_flags |=
3435 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3436 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3437 }
3438 cmd->module_pointer = module_pointer;
3439 cmd->offset = cpu_to_le32(offset);
3440 cmd->length = cpu_to_le16(length);
3441
3442 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3443 if (length > I40E_AQ_LARGE_BUF)
3444 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3445
3446 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3447
3448 i40e_aq_update_nvm_exit:
3449 return status;
3450 }
3451
3452 /**
3453 * i40e_aq_rearrange_nvm
3454 * @hw: pointer to the hw struct
3455 * @rearrange_nvm: defines direction of rearrangement
3456 * @cmd_details: pointer to command details structure or NULL
3457 *
3458 * Rearrange NVM structure, available only for transition FW
3459 **/
i40e_aq_rearrange_nvm(struct i40e_hw * hw,u8 rearrange_nvm,struct i40e_asq_cmd_details * cmd_details)3460 int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3461 u8 rearrange_nvm,
3462 struct i40e_asq_cmd_details *cmd_details)
3463 {
3464 struct i40e_aqc_nvm_update *cmd;
3465 struct i40e_aq_desc desc;
3466 int status;
3467
3468 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3469
3470 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3471
3472 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3473 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3474
3475 if (!rearrange_nvm) {
3476 status = -EINVAL;
3477 goto i40e_aq_rearrange_nvm_exit;
3478 }
3479
3480 cmd->command_flags |= rearrange_nvm;
3481 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3482
3483 i40e_aq_rearrange_nvm_exit:
3484 return status;
3485 }
3486
3487 /**
3488 * i40e_aq_get_lldp_mib
3489 * @hw: pointer to the hw struct
3490 * @bridge_type: type of bridge requested
3491 * @mib_type: Local, Remote or both Local and Remote MIBs
3492 * @buff: pointer to a user supplied buffer to store the MIB block
3493 * @buff_size: size of the buffer (in bytes)
3494 * @local_len : length of the returned Local LLDP MIB
3495 * @remote_len: length of the returned Remote LLDP MIB
3496 * @cmd_details: pointer to command details structure or NULL
3497 *
3498 * Requests the complete LLDP MIB (entire packet).
3499 **/
i40e_aq_get_lldp_mib(struct i40e_hw * hw,u8 bridge_type,u8 mib_type,void * buff,u16 buff_size,u16 * local_len,u16 * remote_len,struct i40e_asq_cmd_details * cmd_details)3500 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3501 u8 mib_type, void *buff, u16 buff_size,
3502 u16 *local_len, u16 *remote_len,
3503 struct i40e_asq_cmd_details *cmd_details)
3504 {
3505 struct i40e_aq_desc desc;
3506 struct i40e_aqc_lldp_get_mib *cmd =
3507 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3508 struct i40e_aqc_lldp_get_mib *resp =
3509 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3510 int status;
3511
3512 if (buff_size == 0 || !buff)
3513 return -EINVAL;
3514
3515 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3516 /* Indirect Command */
3517 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3518
3519 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3520 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3521 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3522
3523 desc.datalen = cpu_to_le16(buff_size);
3524
3525 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3526 if (buff_size > I40E_AQ_LARGE_BUF)
3527 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3528
3529 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3530 if (!status) {
3531 if (local_len != NULL)
3532 *local_len = le16_to_cpu(resp->local_len);
3533 if (remote_len != NULL)
3534 *remote_len = le16_to_cpu(resp->remote_len);
3535 }
3536
3537 return status;
3538 }
3539
3540 /**
3541 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3542 * @hw: pointer to the hw struct
3543 * @mib_type: Local, Remote or both Local and Remote MIBs
3544 * @buff: pointer to a user supplied buffer to store the MIB block
3545 * @buff_size: size of the buffer (in bytes)
3546 * @cmd_details: pointer to command details structure or NULL
3547 *
3548 * Set the LLDP MIB.
3549 **/
3550 int
i40e_aq_set_lldp_mib(struct i40e_hw * hw,u8 mib_type,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3551 i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3552 u8 mib_type, void *buff, u16 buff_size,
3553 struct i40e_asq_cmd_details *cmd_details)
3554 {
3555 struct i40e_aqc_lldp_set_local_mib *cmd;
3556 struct i40e_aq_desc desc;
3557 int status;
3558
3559 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3560 if (buff_size == 0 || !buff)
3561 return -EINVAL;
3562
3563 i40e_fill_default_direct_cmd_desc(&desc,
3564 i40e_aqc_opc_lldp_set_local_mib);
3565 /* Indirect Command */
3566 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3567 if (buff_size > I40E_AQ_LARGE_BUF)
3568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3569 desc.datalen = cpu_to_le16(buff_size);
3570
3571 cmd->type = mib_type;
3572 cmd->length = cpu_to_le16(buff_size);
3573 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3574 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3575
3576 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3577 return status;
3578 }
3579
3580 /**
3581 * i40e_aq_cfg_lldp_mib_change_event
3582 * @hw: pointer to the hw struct
3583 * @enable_update: Enable or Disable event posting
3584 * @cmd_details: pointer to command details structure or NULL
3585 *
3586 * Enable or Disable posting of an event on ARQ when LLDP MIB
3587 * associated with the interface changes
3588 **/
i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw * hw,bool enable_update,struct i40e_asq_cmd_details * cmd_details)3589 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3590 bool enable_update,
3591 struct i40e_asq_cmd_details *cmd_details)
3592 {
3593 struct i40e_aq_desc desc;
3594 struct i40e_aqc_lldp_update_mib *cmd =
3595 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3596 int status;
3597
3598 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3599
3600 if (!enable_update)
3601 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3602
3603 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3604
3605 return status;
3606 }
3607
3608 /**
3609 * i40e_aq_restore_lldp
3610 * @hw: pointer to the hw struct
3611 * @setting: pointer to factory setting variable or NULL
3612 * @restore: True if factory settings should be restored
3613 * @cmd_details: pointer to command details structure or NULL
3614 *
3615 * Restore LLDP Agent factory settings if @restore set to True. In other case
3616 * only returns factory setting in AQ response.
3617 **/
3618 int
i40e_aq_restore_lldp(struct i40e_hw * hw,u8 * setting,bool restore,struct i40e_asq_cmd_details * cmd_details)3619 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3620 struct i40e_asq_cmd_details *cmd_details)
3621 {
3622 struct i40e_aq_desc desc;
3623 struct i40e_aqc_lldp_restore *cmd =
3624 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3625 int status;
3626
3627 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3628 i40e_debug(hw, I40E_DEBUG_ALL,
3629 "Restore LLDP not supported by current FW version.\n");
3630 return -ENODEV;
3631 }
3632
3633 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3634
3635 if (restore)
3636 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3637
3638 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3639
3640 if (setting)
3641 *setting = cmd->command & 1;
3642
3643 return status;
3644 }
3645
3646 /**
3647 * i40e_aq_stop_lldp
3648 * @hw: pointer to the hw struct
3649 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3650 * @persist: True if stop of LLDP should be persistent across power cycles
3651 * @cmd_details: pointer to command details structure or NULL
3652 *
3653 * Stop or Shutdown the embedded LLDP Agent
3654 **/
i40e_aq_stop_lldp(struct i40e_hw * hw,bool shutdown_agent,bool persist,struct i40e_asq_cmd_details * cmd_details)3655 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3656 bool persist,
3657 struct i40e_asq_cmd_details *cmd_details)
3658 {
3659 struct i40e_aq_desc desc;
3660 struct i40e_aqc_lldp_stop *cmd =
3661 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3662 int status;
3663
3664 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3665
3666 if (shutdown_agent)
3667 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3668
3669 if (persist) {
3670 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3671 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3672 else
3673 i40e_debug(hw, I40E_DEBUG_ALL,
3674 "Persistent Stop LLDP not supported by current FW version.\n");
3675 }
3676
3677 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3678
3679 return status;
3680 }
3681
3682 /**
3683 * i40e_aq_start_lldp
3684 * @hw: pointer to the hw struct
3685 * @persist: True if start of LLDP should be persistent across power cycles
3686 * @cmd_details: pointer to command details structure or NULL
3687 *
3688 * Start the embedded LLDP Agent on all ports.
3689 **/
i40e_aq_start_lldp(struct i40e_hw * hw,bool persist,struct i40e_asq_cmd_details * cmd_details)3690 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3691 struct i40e_asq_cmd_details *cmd_details)
3692 {
3693 struct i40e_aq_desc desc;
3694 struct i40e_aqc_lldp_start *cmd =
3695 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3696 int status;
3697
3698 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3699
3700 cmd->command = I40E_AQ_LLDP_AGENT_START;
3701
3702 if (persist) {
3703 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3704 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3705 else
3706 i40e_debug(hw, I40E_DEBUG_ALL,
3707 "Persistent Start LLDP not supported by current FW version.\n");
3708 }
3709
3710 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3711
3712 return status;
3713 }
3714
3715 /**
3716 * i40e_aq_set_dcb_parameters
3717 * @hw: pointer to the hw struct
3718 * @cmd_details: pointer to command details structure or NULL
3719 * @dcb_enable: True if DCB configuration needs to be applied
3720 *
3721 **/
3722 int
i40e_aq_set_dcb_parameters(struct i40e_hw * hw,bool dcb_enable,struct i40e_asq_cmd_details * cmd_details)3723 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3724 struct i40e_asq_cmd_details *cmd_details)
3725 {
3726 struct i40e_aq_desc desc;
3727 struct i40e_aqc_set_dcb_parameters *cmd =
3728 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3729 int status;
3730
3731 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3732 return -ENODEV;
3733
3734 i40e_fill_default_direct_cmd_desc(&desc,
3735 i40e_aqc_opc_set_dcb_parameters);
3736
3737 if (dcb_enable) {
3738 cmd->valid_flags = I40E_DCB_VALID;
3739 cmd->command = I40E_AQ_DCB_SET_AGENT;
3740 }
3741 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3742
3743 return status;
3744 }
3745
3746 /**
3747 * i40e_aq_get_cee_dcb_config
3748 * @hw: pointer to the hw struct
3749 * @buff: response buffer that stores CEE operational configuration
3750 * @buff_size: size of the buffer passed
3751 * @cmd_details: pointer to command details structure or NULL
3752 *
3753 * Get CEE DCBX mode operational configuration from firmware
3754 **/
i40e_aq_get_cee_dcb_config(struct i40e_hw * hw,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3755 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3756 void *buff, u16 buff_size,
3757 struct i40e_asq_cmd_details *cmd_details)
3758 {
3759 struct i40e_aq_desc desc;
3760 int status;
3761
3762 if (buff_size == 0 || !buff)
3763 return -EINVAL;
3764
3765 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3766
3767 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3768 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3769 cmd_details);
3770
3771 return status;
3772 }
3773
3774 /**
3775 * i40e_aq_add_udp_tunnel
3776 * @hw: pointer to the hw struct
3777 * @udp_port: the UDP port to add in Host byte order
3778 * @protocol_index: protocol index type
3779 * @filter_index: pointer to filter index
3780 * @cmd_details: pointer to command details structure or NULL
3781 *
3782 * Note: Firmware expects the udp_port value to be in Little Endian format,
3783 * and this function will call cpu_to_le16 to convert from Host byte order to
3784 * Little Endian order.
3785 **/
i40e_aq_add_udp_tunnel(struct i40e_hw * hw,u16 udp_port,u8 protocol_index,u8 * filter_index,struct i40e_asq_cmd_details * cmd_details)3786 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3787 u16 udp_port, u8 protocol_index,
3788 u8 *filter_index,
3789 struct i40e_asq_cmd_details *cmd_details)
3790 {
3791 struct i40e_aq_desc desc;
3792 struct i40e_aqc_add_udp_tunnel *cmd =
3793 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3794 struct i40e_aqc_del_udp_tunnel_completion *resp =
3795 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3796 int status;
3797
3798 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3799
3800 cmd->udp_port = cpu_to_le16(udp_port);
3801 cmd->protocol_type = protocol_index;
3802
3803 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3804
3805 if (!status && filter_index)
3806 *filter_index = resp->index;
3807
3808 return status;
3809 }
3810
3811 /**
3812 * i40e_aq_del_udp_tunnel
3813 * @hw: pointer to the hw struct
3814 * @index: filter index
3815 * @cmd_details: pointer to command details structure or NULL
3816 **/
i40e_aq_del_udp_tunnel(struct i40e_hw * hw,u8 index,struct i40e_asq_cmd_details * cmd_details)3817 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3818 struct i40e_asq_cmd_details *cmd_details)
3819 {
3820 struct i40e_aq_desc desc;
3821 struct i40e_aqc_remove_udp_tunnel *cmd =
3822 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3823 int status;
3824
3825 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3826
3827 cmd->index = index;
3828
3829 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3830
3831 return status;
3832 }
3833
3834 /**
3835 * i40e_aq_delete_element - Delete switch element
3836 * @hw: pointer to the hw struct
3837 * @seid: the SEID to delete from the switch
3838 * @cmd_details: pointer to command details structure or NULL
3839 *
3840 * This deletes a switch element from the switch.
3841 **/
i40e_aq_delete_element(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)3842 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3843 struct i40e_asq_cmd_details *cmd_details)
3844 {
3845 struct i40e_aq_desc desc;
3846 struct i40e_aqc_switch_seid *cmd =
3847 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3848 int status;
3849
3850 if (seid == 0)
3851 return -EINVAL;
3852
3853 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3854
3855 cmd->seid = cpu_to_le16(seid);
3856
3857 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3858 cmd_details, true);
3859
3860 return status;
3861 }
3862
3863 /**
3864 * i40e_aq_dcb_updated - DCB Updated Command
3865 * @hw: pointer to the hw struct
3866 * @cmd_details: pointer to command details structure or NULL
3867 *
3868 * EMP will return when the shared RPB settings have been
3869 * recomputed and modified. The retval field in the descriptor
3870 * will be set to 0 when RPB is modified.
3871 **/
i40e_aq_dcb_updated(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)3872 int i40e_aq_dcb_updated(struct i40e_hw *hw,
3873 struct i40e_asq_cmd_details *cmd_details)
3874 {
3875 struct i40e_aq_desc desc;
3876 int status;
3877
3878 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3879
3880 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3881
3882 return status;
3883 }
3884
3885 /**
3886 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3887 * @hw: pointer to the hw struct
3888 * @seid: seid for the physical port/switching component/vsi
3889 * @buff: Indirect buffer to hold data parameters and response
3890 * @buff_size: Indirect buffer size
3891 * @opcode: Tx scheduler AQ command opcode
3892 * @cmd_details: pointer to command details structure or NULL
3893 *
3894 * Generic command handler for Tx scheduler AQ commands
3895 **/
i40e_aq_tx_sched_cmd(struct i40e_hw * hw,u16 seid,void * buff,u16 buff_size,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)3896 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3897 void *buff, u16 buff_size,
3898 enum i40e_admin_queue_opc opcode,
3899 struct i40e_asq_cmd_details *cmd_details)
3900 {
3901 struct i40e_aq_desc desc;
3902 struct i40e_aqc_tx_sched_ind *cmd =
3903 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3904 int status;
3905 bool cmd_param_flag = false;
3906
3907 switch (opcode) {
3908 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3909 case i40e_aqc_opc_configure_vsi_tc_bw:
3910 case i40e_aqc_opc_enable_switching_comp_ets:
3911 case i40e_aqc_opc_modify_switching_comp_ets:
3912 case i40e_aqc_opc_disable_switching_comp_ets:
3913 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3914 case i40e_aqc_opc_configure_switching_comp_bw_config:
3915 cmd_param_flag = true;
3916 break;
3917 case i40e_aqc_opc_query_vsi_bw_config:
3918 case i40e_aqc_opc_query_vsi_ets_sla_config:
3919 case i40e_aqc_opc_query_switching_comp_ets_config:
3920 case i40e_aqc_opc_query_port_ets_config:
3921 case i40e_aqc_opc_query_switching_comp_bw_config:
3922 cmd_param_flag = false;
3923 break;
3924 default:
3925 return -EINVAL;
3926 }
3927
3928 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3929
3930 /* Indirect command */
3931 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3932 if (cmd_param_flag)
3933 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3934 if (buff_size > I40E_AQ_LARGE_BUF)
3935 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3936
3937 desc.datalen = cpu_to_le16(buff_size);
3938
3939 cmd->vsi_seid = cpu_to_le16(seid);
3940
3941 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3942
3943 return status;
3944 }
3945
3946 /**
3947 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3948 * @hw: pointer to the hw struct
3949 * @seid: VSI seid
3950 * @credit: BW limit credits (0 = disabled)
3951 * @max_credit: Max BW limit credits
3952 * @cmd_details: pointer to command details structure or NULL
3953 **/
i40e_aq_config_vsi_bw_limit(struct i40e_hw * hw,u16 seid,u16 credit,u8 max_credit,struct i40e_asq_cmd_details * cmd_details)3954 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3955 u16 seid, u16 credit, u8 max_credit,
3956 struct i40e_asq_cmd_details *cmd_details)
3957 {
3958 struct i40e_aq_desc desc;
3959 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3960 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3961 int status;
3962
3963 i40e_fill_default_direct_cmd_desc(&desc,
3964 i40e_aqc_opc_configure_vsi_bw_limit);
3965
3966 cmd->vsi_seid = cpu_to_le16(seid);
3967 cmd->credit = cpu_to_le16(credit);
3968 cmd->max_credit = max_credit;
3969
3970 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3971
3972 return status;
3973 }
3974
3975 /**
3976 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3977 * @hw: pointer to the hw struct
3978 * @seid: VSI seid
3979 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3980 * @cmd_details: pointer to command details structure or NULL
3981 **/
i40e_aq_config_vsi_tc_bw(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_vsi_tc_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)3982 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3983 u16 seid,
3984 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3985 struct i40e_asq_cmd_details *cmd_details)
3986 {
3987 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3988 i40e_aqc_opc_configure_vsi_tc_bw,
3989 cmd_details);
3990 }
3991
3992 /**
3993 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3994 * @hw: pointer to the hw struct
3995 * @seid: seid of the switching component connected to Physical Port
3996 * @ets_data: Buffer holding ETS parameters
3997 * @opcode: Tx scheduler AQ command opcode
3998 * @cmd_details: pointer to command details structure or NULL
3999 **/
4000 int
i40e_aq_config_switch_comp_ets(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_ets_data * ets_data,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)4001 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4002 u16 seid,
4003 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4004 enum i40e_admin_queue_opc opcode,
4005 struct i40e_asq_cmd_details *cmd_details)
4006 {
4007 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4008 sizeof(*ets_data), opcode, cmd_details);
4009 }
4010
4011 /**
4012 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4013 * @hw: pointer to the hw struct
4014 * @seid: seid of the switching component
4015 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4016 * @cmd_details: pointer to command details structure or NULL
4017 **/
4018 int
i40e_aq_config_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_bw_config_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4019 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4020 u16 seid,
4021 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4022 struct i40e_asq_cmd_details *cmd_details)
4023 {
4024 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4025 i40e_aqc_opc_configure_switching_comp_bw_config,
4026 cmd_details);
4027 }
4028
4029 /**
4030 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4031 * @hw: pointer to the hw struct
4032 * @seid: seid of the VSI
4033 * @bw_data: Buffer to hold VSI BW configuration
4034 * @cmd_details: pointer to command details structure or NULL
4035 **/
4036 int
i40e_aq_query_vsi_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4037 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4038 u16 seid,
4039 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4040 struct i40e_asq_cmd_details *cmd_details)
4041 {
4042 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4043 i40e_aqc_opc_query_vsi_bw_config,
4044 cmd_details);
4045 }
4046
4047 /**
4048 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4049 * @hw: pointer to the hw struct
4050 * @seid: seid of the VSI
4051 * @bw_data: Buffer to hold VSI BW configuration per TC
4052 * @cmd_details: pointer to command details structure or NULL
4053 **/
4054 int
i40e_aq_query_vsi_ets_sla_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_ets_sla_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4055 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4056 u16 seid,
4057 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4058 struct i40e_asq_cmd_details *cmd_details)
4059 {
4060 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4061 i40e_aqc_opc_query_vsi_ets_sla_config,
4062 cmd_details);
4063 }
4064
4065 /**
4066 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4067 * @hw: pointer to the hw struct
4068 * @seid: seid of the switching component
4069 * @bw_data: Buffer to hold switching component's per TC BW config
4070 * @cmd_details: pointer to command details structure or NULL
4071 **/
4072 int
i40e_aq_query_switch_comp_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4073 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4074 u16 seid,
4075 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4076 struct i40e_asq_cmd_details *cmd_details)
4077 {
4078 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4079 i40e_aqc_opc_query_switching_comp_ets_config,
4080 cmd_details);
4081 }
4082
4083 /**
4084 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4085 * @hw: pointer to the hw struct
4086 * @seid: seid of the VSI or switching component connected to Physical Port
4087 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4088 * @cmd_details: pointer to command details structure or NULL
4089 **/
4090 int
i40e_aq_query_port_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_port_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4091 i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4092 u16 seid,
4093 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4094 struct i40e_asq_cmd_details *cmd_details)
4095 {
4096 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4097 i40e_aqc_opc_query_port_ets_config,
4098 cmd_details);
4099 }
4100
4101 /**
4102 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4103 * @hw: pointer to the hw struct
4104 * @seid: seid of the switching component
4105 * @bw_data: Buffer to hold switching component's BW configuration
4106 * @cmd_details: pointer to command details structure or NULL
4107 **/
4108 int
i40e_aq_query_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4109 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4110 u16 seid,
4111 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4112 struct i40e_asq_cmd_details *cmd_details)
4113 {
4114 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4115 i40e_aqc_opc_query_switching_comp_bw_config,
4116 cmd_details);
4117 }
4118
4119 /**
4120 * i40e_validate_filter_settings
4121 * @hw: pointer to the hardware structure
4122 * @settings: Filter control settings
4123 *
4124 * Check and validate the filter control settings passed.
4125 * The function checks for the valid filter/context sizes being
4126 * passed for FCoE and PE.
4127 *
4128 * Returns 0 if the values passed are valid and within
4129 * range else returns an error.
4130 **/
4131 static int
i40e_validate_filter_settings(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4132 i40e_validate_filter_settings(struct i40e_hw *hw,
4133 struct i40e_filter_control_settings *settings)
4134 {
4135 u32 fcoe_cntx_size, fcoe_filt_size;
4136 u32 fcoe_fmax;
4137 u32 val;
4138
4139 /* Validate FCoE settings passed */
4140 switch (settings->fcoe_filt_num) {
4141 case I40E_HASH_FILTER_SIZE_1K:
4142 case I40E_HASH_FILTER_SIZE_2K:
4143 case I40E_HASH_FILTER_SIZE_4K:
4144 case I40E_HASH_FILTER_SIZE_8K:
4145 case I40E_HASH_FILTER_SIZE_16K:
4146 case I40E_HASH_FILTER_SIZE_32K:
4147 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4148 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4149 break;
4150 default:
4151 return -EINVAL;
4152 }
4153
4154 switch (settings->fcoe_cntx_num) {
4155 case I40E_DMA_CNTX_SIZE_512:
4156 case I40E_DMA_CNTX_SIZE_1K:
4157 case I40E_DMA_CNTX_SIZE_2K:
4158 case I40E_DMA_CNTX_SIZE_4K:
4159 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4160 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4161 break;
4162 default:
4163 return -EINVAL;
4164 }
4165
4166 /* Validate PE settings passed */
4167 switch (settings->pe_filt_num) {
4168 case I40E_HASH_FILTER_SIZE_1K:
4169 case I40E_HASH_FILTER_SIZE_2K:
4170 case I40E_HASH_FILTER_SIZE_4K:
4171 case I40E_HASH_FILTER_SIZE_8K:
4172 case I40E_HASH_FILTER_SIZE_16K:
4173 case I40E_HASH_FILTER_SIZE_32K:
4174 case I40E_HASH_FILTER_SIZE_64K:
4175 case I40E_HASH_FILTER_SIZE_128K:
4176 case I40E_HASH_FILTER_SIZE_256K:
4177 case I40E_HASH_FILTER_SIZE_512K:
4178 case I40E_HASH_FILTER_SIZE_1M:
4179 break;
4180 default:
4181 return -EINVAL;
4182 }
4183
4184 switch (settings->pe_cntx_num) {
4185 case I40E_DMA_CNTX_SIZE_512:
4186 case I40E_DMA_CNTX_SIZE_1K:
4187 case I40E_DMA_CNTX_SIZE_2K:
4188 case I40E_DMA_CNTX_SIZE_4K:
4189 case I40E_DMA_CNTX_SIZE_8K:
4190 case I40E_DMA_CNTX_SIZE_16K:
4191 case I40E_DMA_CNTX_SIZE_32K:
4192 case I40E_DMA_CNTX_SIZE_64K:
4193 case I40E_DMA_CNTX_SIZE_128K:
4194 case I40E_DMA_CNTX_SIZE_256K:
4195 break;
4196 default:
4197 return -EINVAL;
4198 }
4199
4200 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4201 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4202 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4203 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4204 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4205 return -EINVAL;
4206
4207 return 0;
4208 }
4209
4210 /**
4211 * i40e_set_filter_control
4212 * @hw: pointer to the hardware structure
4213 * @settings: Filter control settings
4214 *
4215 * Set the Queue Filters for PE/FCoE and enable filters required
4216 * for a single PF. It is expected that these settings are programmed
4217 * at the driver initialization time.
4218 **/
i40e_set_filter_control(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4219 int i40e_set_filter_control(struct i40e_hw *hw,
4220 struct i40e_filter_control_settings *settings)
4221 {
4222 u32 hash_lut_size = 0;
4223 int ret = 0;
4224 u32 val;
4225
4226 if (!settings)
4227 return -EINVAL;
4228
4229 /* Validate the input settings */
4230 ret = i40e_validate_filter_settings(hw, settings);
4231 if (ret)
4232 return ret;
4233
4234 /* Read the PF Queue Filter control register */
4235 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4236
4237 /* Program required PE hash buckets for the PF */
4238 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4239 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4240 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4241 /* Program required PE contexts for the PF */
4242 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4243 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4244 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4245
4246 /* Program required FCoE hash buckets for the PF */
4247 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4248 val |= ((u32)settings->fcoe_filt_num <<
4249 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4250 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4251 /* Program required FCoE DDP contexts for the PF */
4252 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4253 val |= ((u32)settings->fcoe_cntx_num <<
4254 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4255 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4256
4257 /* Program Hash LUT size for the PF */
4258 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4259 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4260 hash_lut_size = 1;
4261 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4262 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4263
4264 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4265 if (settings->enable_fdir)
4266 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4267 if (settings->enable_ethtype)
4268 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4269 if (settings->enable_macvlan)
4270 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4271
4272 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4273
4274 return 0;
4275 }
4276
4277 /**
4278 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4279 * @hw: pointer to the hw struct
4280 * @mac_addr: MAC address to use in the filter
4281 * @ethtype: Ethertype to use in the filter
4282 * @flags: Flags that needs to be applied to the filter
4283 * @vsi_seid: seid of the control VSI
4284 * @queue: VSI queue number to send the packet to
4285 * @is_add: Add control packet filter if True else remove
4286 * @stats: Structure to hold information on control filter counts
4287 * @cmd_details: pointer to command details structure or NULL
4288 *
4289 * This command will Add or Remove control packet filter for a control VSI.
4290 * In return it will update the total number of perfect filter count in
4291 * the stats member.
4292 **/
i40e_aq_add_rem_control_packet_filter(struct i40e_hw * hw,u8 * mac_addr,u16 ethtype,u16 flags,u16 vsi_seid,u16 queue,bool is_add,struct i40e_control_filter_stats * stats,struct i40e_asq_cmd_details * cmd_details)4293 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4294 u8 *mac_addr, u16 ethtype, u16 flags,
4295 u16 vsi_seid, u16 queue, bool is_add,
4296 struct i40e_control_filter_stats *stats,
4297 struct i40e_asq_cmd_details *cmd_details)
4298 {
4299 struct i40e_aq_desc desc;
4300 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4301 (struct i40e_aqc_add_remove_control_packet_filter *)
4302 &desc.params.raw;
4303 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4304 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4305 &desc.params.raw;
4306 int status;
4307
4308 if (vsi_seid == 0)
4309 return -EINVAL;
4310
4311 if (is_add) {
4312 i40e_fill_default_direct_cmd_desc(&desc,
4313 i40e_aqc_opc_add_control_packet_filter);
4314 cmd->queue = cpu_to_le16(queue);
4315 } else {
4316 i40e_fill_default_direct_cmd_desc(&desc,
4317 i40e_aqc_opc_remove_control_packet_filter);
4318 }
4319
4320 if (mac_addr)
4321 ether_addr_copy(cmd->mac, mac_addr);
4322
4323 cmd->etype = cpu_to_le16(ethtype);
4324 cmd->flags = cpu_to_le16(flags);
4325 cmd->seid = cpu_to_le16(vsi_seid);
4326
4327 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4328
4329 if (!status && stats) {
4330 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4331 stats->etype_used = le16_to_cpu(resp->etype_used);
4332 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4333 stats->etype_free = le16_to_cpu(resp->etype_free);
4334 }
4335
4336 return status;
4337 }
4338
4339 /**
4340 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4341 * @hw: pointer to the hw struct
4342 * @seid: VSI seid to add ethertype filter from
4343 **/
i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw * hw,u16 seid)4344 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4345 u16 seid)
4346 {
4347 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4348 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4349 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4350 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4351 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4352 int status;
4353
4354 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4355 seid, 0, true, NULL,
4356 NULL);
4357 if (status)
4358 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4359 }
4360
4361 /**
4362 * i40e_aq_alternate_read
4363 * @hw: pointer to the hardware structure
4364 * @reg_addr0: address of first dword to be read
4365 * @reg_val0: pointer for data read from 'reg_addr0'
4366 * @reg_addr1: address of second dword to be read
4367 * @reg_val1: pointer for data read from 'reg_addr1'
4368 *
4369 * Read one or two dwords from alternate structure. Fields are indicated
4370 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4371 * is not passed then only register at 'reg_addr0' is read.
4372 *
4373 **/
i40e_aq_alternate_read(struct i40e_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)4374 static int i40e_aq_alternate_read(struct i40e_hw *hw,
4375 u32 reg_addr0, u32 *reg_val0,
4376 u32 reg_addr1, u32 *reg_val1)
4377 {
4378 struct i40e_aq_desc desc;
4379 struct i40e_aqc_alternate_write *cmd_resp =
4380 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4381 int status;
4382
4383 if (!reg_val0)
4384 return -EINVAL;
4385
4386 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4387 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4388 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4389
4390 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4391
4392 if (!status) {
4393 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4394
4395 if (reg_val1)
4396 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4397 }
4398
4399 return status;
4400 }
4401
4402 /**
4403 * i40e_aq_suspend_port_tx
4404 * @hw: pointer to the hardware structure
4405 * @seid: port seid
4406 * @cmd_details: pointer to command details structure or NULL
4407 *
4408 * Suspend port's Tx traffic
4409 **/
i40e_aq_suspend_port_tx(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)4410 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4411 struct i40e_asq_cmd_details *cmd_details)
4412 {
4413 struct i40e_aqc_tx_sched_ind *cmd;
4414 struct i40e_aq_desc desc;
4415 int status;
4416
4417 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4418 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4419 cmd->vsi_seid = cpu_to_le16(seid);
4420 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4421
4422 return status;
4423 }
4424
4425 /**
4426 * i40e_aq_resume_port_tx
4427 * @hw: pointer to the hardware structure
4428 * @cmd_details: pointer to command details structure or NULL
4429 *
4430 * Resume port's Tx traffic
4431 **/
i40e_aq_resume_port_tx(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)4432 int i40e_aq_resume_port_tx(struct i40e_hw *hw,
4433 struct i40e_asq_cmd_details *cmd_details)
4434 {
4435 struct i40e_aq_desc desc;
4436 int status;
4437
4438 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4439
4440 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4441
4442 return status;
4443 }
4444
4445 /**
4446 * i40e_set_pci_config_data - store PCI bus info
4447 * @hw: pointer to hardware structure
4448 * @link_status: the link status word from PCI config space
4449 *
4450 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4451 **/
i40e_set_pci_config_data(struct i40e_hw * hw,u16 link_status)4452 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4453 {
4454 hw->bus.type = i40e_bus_type_pci_express;
4455
4456 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4457 case PCI_EXP_LNKSTA_NLW_X1:
4458 hw->bus.width = i40e_bus_width_pcie_x1;
4459 break;
4460 case PCI_EXP_LNKSTA_NLW_X2:
4461 hw->bus.width = i40e_bus_width_pcie_x2;
4462 break;
4463 case PCI_EXP_LNKSTA_NLW_X4:
4464 hw->bus.width = i40e_bus_width_pcie_x4;
4465 break;
4466 case PCI_EXP_LNKSTA_NLW_X8:
4467 hw->bus.width = i40e_bus_width_pcie_x8;
4468 break;
4469 default:
4470 hw->bus.width = i40e_bus_width_unknown;
4471 break;
4472 }
4473
4474 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4475 case PCI_EXP_LNKSTA_CLS_2_5GB:
4476 hw->bus.speed = i40e_bus_speed_2500;
4477 break;
4478 case PCI_EXP_LNKSTA_CLS_5_0GB:
4479 hw->bus.speed = i40e_bus_speed_5000;
4480 break;
4481 case PCI_EXP_LNKSTA_CLS_8_0GB:
4482 hw->bus.speed = i40e_bus_speed_8000;
4483 break;
4484 default:
4485 hw->bus.speed = i40e_bus_speed_unknown;
4486 break;
4487 }
4488 }
4489
4490 /**
4491 * i40e_aq_debug_dump
4492 * @hw: pointer to the hardware structure
4493 * @cluster_id: specific cluster to dump
4494 * @table_id: table id within cluster
4495 * @start_index: index of line in the block to read
4496 * @buff_size: dump buffer size
4497 * @buff: dump buffer
4498 * @ret_buff_size: actual buffer size returned
4499 * @ret_next_table: next block to read
4500 * @ret_next_index: next index to read
4501 * @cmd_details: pointer to command details structure or NULL
4502 *
4503 * Dump internal FW/HW data for debug purposes.
4504 *
4505 **/
i40e_aq_debug_dump(struct i40e_hw * hw,u8 cluster_id,u8 table_id,u32 start_index,u16 buff_size,void * buff,u16 * ret_buff_size,u8 * ret_next_table,u32 * ret_next_index,struct i40e_asq_cmd_details * cmd_details)4506 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4507 u8 table_id, u32 start_index, u16 buff_size,
4508 void *buff, u16 *ret_buff_size,
4509 u8 *ret_next_table, u32 *ret_next_index,
4510 struct i40e_asq_cmd_details *cmd_details)
4511 {
4512 struct i40e_aq_desc desc;
4513 struct i40e_aqc_debug_dump_internals *cmd =
4514 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4515 struct i40e_aqc_debug_dump_internals *resp =
4516 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4517 int status;
4518
4519 if (buff_size == 0 || !buff)
4520 return -EINVAL;
4521
4522 i40e_fill_default_direct_cmd_desc(&desc,
4523 i40e_aqc_opc_debug_dump_internals);
4524 /* Indirect Command */
4525 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4526 if (buff_size > I40E_AQ_LARGE_BUF)
4527 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4528
4529 cmd->cluster_id = cluster_id;
4530 cmd->table_id = table_id;
4531 cmd->idx = cpu_to_le32(start_index);
4532
4533 desc.datalen = cpu_to_le16(buff_size);
4534
4535 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4536 if (!status) {
4537 if (ret_buff_size)
4538 *ret_buff_size = le16_to_cpu(desc.datalen);
4539 if (ret_next_table)
4540 *ret_next_table = resp->table_id;
4541 if (ret_next_index)
4542 *ret_next_index = le32_to_cpu(resp->idx);
4543 }
4544
4545 return status;
4546 }
4547
4548 /**
4549 * i40e_read_bw_from_alt_ram
4550 * @hw: pointer to the hardware structure
4551 * @max_bw: pointer for max_bw read
4552 * @min_bw: pointer for min_bw read
4553 * @min_valid: pointer for bool that is true if min_bw is a valid value
4554 * @max_valid: pointer for bool that is true if max_bw is a valid value
4555 *
4556 * Read bw from the alternate ram for the given pf
4557 **/
i40e_read_bw_from_alt_ram(struct i40e_hw * hw,u32 * max_bw,u32 * min_bw,bool * min_valid,bool * max_valid)4558 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4559 u32 *max_bw, u32 *min_bw,
4560 bool *min_valid, bool *max_valid)
4561 {
4562 u32 max_bw_addr, min_bw_addr;
4563 int status;
4564
4565 /* Calculate the address of the min/max bw registers */
4566 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4567 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4568 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4569 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4570 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4571 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4572
4573 /* Read the bandwidths from alt ram */
4574 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4575 min_bw_addr, min_bw);
4576
4577 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4578 *min_valid = true;
4579 else
4580 *min_valid = false;
4581
4582 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4583 *max_valid = true;
4584 else
4585 *max_valid = false;
4586
4587 return status;
4588 }
4589
4590 /**
4591 * i40e_aq_configure_partition_bw
4592 * @hw: pointer to the hardware structure
4593 * @bw_data: Buffer holding valid pfs and bw limits
4594 * @cmd_details: pointer to command details
4595 *
4596 * Configure partitions guaranteed/max bw
4597 **/
4598 int
i40e_aq_configure_partition_bw(struct i40e_hw * hw,struct i40e_aqc_configure_partition_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4599 i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4600 struct i40e_aqc_configure_partition_bw_data *bw_data,
4601 struct i40e_asq_cmd_details *cmd_details)
4602 {
4603 u16 bwd_size = sizeof(*bw_data);
4604 struct i40e_aq_desc desc;
4605 int status;
4606
4607 i40e_fill_default_direct_cmd_desc(&desc,
4608 i40e_aqc_opc_configure_partition_bw);
4609
4610 /* Indirect command */
4611 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4612 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4613
4614 if (bwd_size > I40E_AQ_LARGE_BUF)
4615 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4616
4617 desc.datalen = cpu_to_le16(bwd_size);
4618
4619 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4620 cmd_details);
4621
4622 return status;
4623 }
4624
4625 /**
4626 * i40e_read_phy_register_clause22
4627 * @hw: pointer to the HW structure
4628 * @reg: register address in the page
4629 * @phy_addr: PHY address on MDIO interface
4630 * @value: PHY register value
4631 *
4632 * Reads specified PHY register value
4633 **/
i40e_read_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 * value)4634 int i40e_read_phy_register_clause22(struct i40e_hw *hw,
4635 u16 reg, u8 phy_addr, u16 *value)
4636 {
4637 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4638 int status = -EIO;
4639 u32 command = 0;
4640 u16 retry = 1000;
4641
4642 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4643 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4644 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4645 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4646 (I40E_GLGEN_MSCA_MDICMD_MASK);
4647 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4648 do {
4649 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4650 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4651 status = 0;
4652 break;
4653 }
4654 udelay(10);
4655 retry--;
4656 } while (retry);
4657
4658 if (status) {
4659 i40e_debug(hw, I40E_DEBUG_PHY,
4660 "PHY: Can't write command to external PHY.\n");
4661 } else {
4662 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4663 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4664 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4665 }
4666
4667 return status;
4668 }
4669
4670 /**
4671 * i40e_write_phy_register_clause22
4672 * @hw: pointer to the HW structure
4673 * @reg: register address in the page
4674 * @phy_addr: PHY address on MDIO interface
4675 * @value: PHY register value
4676 *
4677 * Writes specified PHY register value
4678 **/
i40e_write_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 value)4679 int i40e_write_phy_register_clause22(struct i40e_hw *hw,
4680 u16 reg, u8 phy_addr, u16 value)
4681 {
4682 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4683 int status = -EIO;
4684 u32 command = 0;
4685 u16 retry = 1000;
4686
4687 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4688 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4689
4690 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4691 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4692 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4693 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4694 (I40E_GLGEN_MSCA_MDICMD_MASK);
4695
4696 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4697 do {
4698 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4699 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4700 status = 0;
4701 break;
4702 }
4703 udelay(10);
4704 retry--;
4705 } while (retry);
4706
4707 return status;
4708 }
4709
4710 /**
4711 * i40e_read_phy_register_clause45
4712 * @hw: pointer to the HW structure
4713 * @page: registers page number
4714 * @reg: register address in the page
4715 * @phy_addr: PHY address on MDIO interface
4716 * @value: PHY register value
4717 *
4718 * Reads specified PHY register value
4719 **/
i40e_read_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4720 int i40e_read_phy_register_clause45(struct i40e_hw *hw,
4721 u8 page, u16 reg, u8 phy_addr, u16 *value)
4722 {
4723 u8 port_num = hw->func_caps.mdio_port_num;
4724 int status = -EIO;
4725 u32 command = 0;
4726 u16 retry = 1000;
4727
4728 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4729 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4730 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4731 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4732 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4733 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4734 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4735 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4736 do {
4737 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4738 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4739 status = 0;
4740 break;
4741 }
4742 usleep_range(10, 20);
4743 retry--;
4744 } while (retry);
4745
4746 if (status) {
4747 i40e_debug(hw, I40E_DEBUG_PHY,
4748 "PHY: Can't write command to external PHY.\n");
4749 goto phy_read_end;
4750 }
4751
4752 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4753 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4754 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4755 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4756 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4757 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4758 status = -EIO;
4759 retry = 1000;
4760 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4761 do {
4762 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4763 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4764 status = 0;
4765 break;
4766 }
4767 usleep_range(10, 20);
4768 retry--;
4769 } while (retry);
4770
4771 if (!status) {
4772 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4773 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4774 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4775 } else {
4776 i40e_debug(hw, I40E_DEBUG_PHY,
4777 "PHY: Can't read register value from external PHY.\n");
4778 }
4779
4780 phy_read_end:
4781 return status;
4782 }
4783
4784 /**
4785 * i40e_write_phy_register_clause45
4786 * @hw: pointer to the HW structure
4787 * @page: registers page number
4788 * @reg: register address in the page
4789 * @phy_addr: PHY address on MDIO interface
4790 * @value: PHY register value
4791 *
4792 * Writes value to specified PHY register
4793 **/
i40e_write_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4794 int i40e_write_phy_register_clause45(struct i40e_hw *hw,
4795 u8 page, u16 reg, u8 phy_addr, u16 value)
4796 {
4797 u8 port_num = hw->func_caps.mdio_port_num;
4798 int status = -EIO;
4799 u16 retry = 1000;
4800 u32 command = 0;
4801
4802 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4803 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4804 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4805 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4806 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4807 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4808 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4809 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4810 do {
4811 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4812 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4813 status = 0;
4814 break;
4815 }
4816 usleep_range(10, 20);
4817 retry--;
4818 } while (retry);
4819 if (status) {
4820 i40e_debug(hw, I40E_DEBUG_PHY,
4821 "PHY: Can't write command to external PHY.\n");
4822 goto phy_write_end;
4823 }
4824
4825 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4826 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4827
4828 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4829 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4830 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4831 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4832 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4833 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4834 status = -EIO;
4835 retry = 1000;
4836 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4837 do {
4838 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4839 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4840 status = 0;
4841 break;
4842 }
4843 usleep_range(10, 20);
4844 retry--;
4845 } while (retry);
4846
4847 phy_write_end:
4848 return status;
4849 }
4850
4851 /**
4852 * i40e_write_phy_register
4853 * @hw: pointer to the HW structure
4854 * @page: registers page number
4855 * @reg: register address in the page
4856 * @phy_addr: PHY address on MDIO interface
4857 * @value: PHY register value
4858 *
4859 * Writes value to specified PHY register
4860 **/
i40e_write_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4861 int i40e_write_phy_register(struct i40e_hw *hw,
4862 u8 page, u16 reg, u8 phy_addr, u16 value)
4863 {
4864 int status;
4865
4866 switch (hw->device_id) {
4867 case I40E_DEV_ID_1G_BASE_T_X722:
4868 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4869 value);
4870 break;
4871 case I40E_DEV_ID_1G_BASE_T_BC:
4872 case I40E_DEV_ID_5G_BASE_T_BC:
4873 case I40E_DEV_ID_10G_BASE_T:
4874 case I40E_DEV_ID_10G_BASE_T4:
4875 case I40E_DEV_ID_10G_BASE_T_BC:
4876 case I40E_DEV_ID_10G_BASE_T_X722:
4877 case I40E_DEV_ID_25G_B:
4878 case I40E_DEV_ID_25G_SFP28:
4879 status = i40e_write_phy_register_clause45(hw, page, reg,
4880 phy_addr, value);
4881 break;
4882 default:
4883 status = -EIO;
4884 break;
4885 }
4886
4887 return status;
4888 }
4889
4890 /**
4891 * i40e_read_phy_register
4892 * @hw: pointer to the HW structure
4893 * @page: registers page number
4894 * @reg: register address in the page
4895 * @phy_addr: PHY address on MDIO interface
4896 * @value: PHY register value
4897 *
4898 * Reads specified PHY register value
4899 **/
i40e_read_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4900 int i40e_read_phy_register(struct i40e_hw *hw,
4901 u8 page, u16 reg, u8 phy_addr, u16 *value)
4902 {
4903 int status;
4904
4905 switch (hw->device_id) {
4906 case I40E_DEV_ID_1G_BASE_T_X722:
4907 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4908 value);
4909 break;
4910 case I40E_DEV_ID_1G_BASE_T_BC:
4911 case I40E_DEV_ID_5G_BASE_T_BC:
4912 case I40E_DEV_ID_10G_BASE_T:
4913 case I40E_DEV_ID_10G_BASE_T4:
4914 case I40E_DEV_ID_10G_BASE_T_BC:
4915 case I40E_DEV_ID_10G_BASE_T_X722:
4916 case I40E_DEV_ID_25G_B:
4917 case I40E_DEV_ID_25G_SFP28:
4918 status = i40e_read_phy_register_clause45(hw, page, reg,
4919 phy_addr, value);
4920 break;
4921 default:
4922 status = -EIO;
4923 break;
4924 }
4925
4926 return status;
4927 }
4928
4929 /**
4930 * i40e_get_phy_address
4931 * @hw: pointer to the HW structure
4932 * @dev_num: PHY port num that address we want
4933 *
4934 * Gets PHY address for current port
4935 **/
i40e_get_phy_address(struct i40e_hw * hw,u8 dev_num)4936 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4937 {
4938 u8 port_num = hw->func_caps.mdio_port_num;
4939 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4940
4941 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4942 }
4943
4944 /**
4945 * i40e_blink_phy_link_led
4946 * @hw: pointer to the HW structure
4947 * @time: time how long led will blinks in secs
4948 * @interval: gap between LED on and off in msecs
4949 *
4950 * Blinks PHY link LED
4951 **/
i40e_blink_phy_link_led(struct i40e_hw * hw,u32 time,u32 interval)4952 int i40e_blink_phy_link_led(struct i40e_hw *hw,
4953 u32 time, u32 interval)
4954 {
4955 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4956 u16 gpio_led_port;
4957 u8 phy_addr = 0;
4958 int status = 0;
4959 u16 led_ctl;
4960 u8 port_num;
4961 u16 led_reg;
4962 u32 i;
4963
4964 i = rd32(hw, I40E_PFGEN_PORTNUM);
4965 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4966 phy_addr = i40e_get_phy_address(hw, port_num);
4967
4968 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4969 led_addr++) {
4970 status = i40e_read_phy_register_clause45(hw,
4971 I40E_PHY_COM_REG_PAGE,
4972 led_addr, phy_addr,
4973 &led_reg);
4974 if (status)
4975 goto phy_blinking_end;
4976 led_ctl = led_reg;
4977 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4978 led_reg = 0;
4979 status = i40e_write_phy_register_clause45(hw,
4980 I40E_PHY_COM_REG_PAGE,
4981 led_addr, phy_addr,
4982 led_reg);
4983 if (status)
4984 goto phy_blinking_end;
4985 break;
4986 }
4987 }
4988
4989 if (time > 0 && interval > 0) {
4990 for (i = 0; i < time * 1000; i += interval) {
4991 status = i40e_read_phy_register_clause45(hw,
4992 I40E_PHY_COM_REG_PAGE,
4993 led_addr, phy_addr, &led_reg);
4994 if (status)
4995 goto restore_config;
4996 if (led_reg & I40E_PHY_LED_MANUAL_ON)
4997 led_reg = 0;
4998 else
4999 led_reg = I40E_PHY_LED_MANUAL_ON;
5000 status = i40e_write_phy_register_clause45(hw,
5001 I40E_PHY_COM_REG_PAGE,
5002 led_addr, phy_addr, led_reg);
5003 if (status)
5004 goto restore_config;
5005 msleep(interval);
5006 }
5007 }
5008
5009 restore_config:
5010 status = i40e_write_phy_register_clause45(hw,
5011 I40E_PHY_COM_REG_PAGE,
5012 led_addr, phy_addr, led_ctl);
5013
5014 phy_blinking_end:
5015 return status;
5016 }
5017
5018 /**
5019 * i40e_led_get_reg - read LED register
5020 * @hw: pointer to the HW structure
5021 * @led_addr: LED register address
5022 * @reg_val: read register value
5023 **/
i40e_led_get_reg(struct i40e_hw * hw,u16 led_addr,u32 * reg_val)5024 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5025 u32 *reg_val)
5026 {
5027 u8 phy_addr = 0;
5028 u8 port_num;
5029 int status;
5030 u32 i;
5031
5032 *reg_val = 0;
5033 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5034 status =
5035 i40e_aq_get_phy_register(hw,
5036 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5037 I40E_PHY_COM_REG_PAGE, true,
5038 I40E_PHY_LED_PROV_REG_1,
5039 reg_val, NULL);
5040 } else {
5041 i = rd32(hw, I40E_PFGEN_PORTNUM);
5042 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5043 phy_addr = i40e_get_phy_address(hw, port_num);
5044 status = i40e_read_phy_register_clause45(hw,
5045 I40E_PHY_COM_REG_PAGE,
5046 led_addr, phy_addr,
5047 (u16 *)reg_val);
5048 }
5049 return status;
5050 }
5051
5052 /**
5053 * i40e_led_set_reg - write LED register
5054 * @hw: pointer to the HW structure
5055 * @led_addr: LED register address
5056 * @reg_val: register value to write
5057 **/
i40e_led_set_reg(struct i40e_hw * hw,u16 led_addr,u32 reg_val)5058 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5059 u32 reg_val)
5060 {
5061 u8 phy_addr = 0;
5062 u8 port_num;
5063 int status;
5064 u32 i;
5065
5066 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5067 status =
5068 i40e_aq_set_phy_register(hw,
5069 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5070 I40E_PHY_COM_REG_PAGE, true,
5071 I40E_PHY_LED_PROV_REG_1,
5072 reg_val, NULL);
5073 } else {
5074 i = rd32(hw, I40E_PFGEN_PORTNUM);
5075 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5076 phy_addr = i40e_get_phy_address(hw, port_num);
5077 status = i40e_write_phy_register_clause45(hw,
5078 I40E_PHY_COM_REG_PAGE,
5079 led_addr, phy_addr,
5080 (u16)reg_val);
5081 }
5082
5083 return status;
5084 }
5085
5086 /**
5087 * i40e_led_get_phy - return current on/off mode
5088 * @hw: pointer to the hw struct
5089 * @led_addr: address of led register to use
5090 * @val: original value of register to use
5091 *
5092 **/
i40e_led_get_phy(struct i40e_hw * hw,u16 * led_addr,u16 * val)5093 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5094 u16 *val)
5095 {
5096 u16 gpio_led_port;
5097 u8 phy_addr = 0;
5098 u32 reg_val_aq;
5099 int status = 0;
5100 u16 temp_addr;
5101 u16 reg_val;
5102 u8 port_num;
5103 u32 i;
5104
5105 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5106 status =
5107 i40e_aq_get_phy_register(hw,
5108 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5109 I40E_PHY_COM_REG_PAGE, true,
5110 I40E_PHY_LED_PROV_REG_1,
5111 ®_val_aq, NULL);
5112 if (status == 0)
5113 *val = (u16)reg_val_aq;
5114 return status;
5115 }
5116 temp_addr = I40E_PHY_LED_PROV_REG_1;
5117 i = rd32(hw, I40E_PFGEN_PORTNUM);
5118 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5119 phy_addr = i40e_get_phy_address(hw, port_num);
5120
5121 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5122 temp_addr++) {
5123 status = i40e_read_phy_register_clause45(hw,
5124 I40E_PHY_COM_REG_PAGE,
5125 temp_addr, phy_addr,
5126 ®_val);
5127 if (status)
5128 return status;
5129 *val = reg_val;
5130 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5131 *led_addr = temp_addr;
5132 break;
5133 }
5134 }
5135 return status;
5136 }
5137
5138 /**
5139 * i40e_led_set_phy
5140 * @hw: pointer to the HW structure
5141 * @on: true or false
5142 * @led_addr: address of led register to use
5143 * @mode: original val plus bit for set or ignore
5144 *
5145 * Set led's on or off when controlled by the PHY
5146 *
5147 **/
i40e_led_set_phy(struct i40e_hw * hw,bool on,u16 led_addr,u32 mode)5148 int i40e_led_set_phy(struct i40e_hw *hw, bool on,
5149 u16 led_addr, u32 mode)
5150 {
5151 u32 led_ctl = 0;
5152 u32 led_reg = 0;
5153 int status = 0;
5154
5155 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5156 if (status)
5157 return status;
5158 led_ctl = led_reg;
5159 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5160 led_reg = 0;
5161 status = i40e_led_set_reg(hw, led_addr, led_reg);
5162 if (status)
5163 return status;
5164 }
5165 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5166 if (status)
5167 goto restore_config;
5168 if (on)
5169 led_reg = I40E_PHY_LED_MANUAL_ON;
5170 else
5171 led_reg = 0;
5172
5173 status = i40e_led_set_reg(hw, led_addr, led_reg);
5174 if (status)
5175 goto restore_config;
5176 if (mode & I40E_PHY_LED_MODE_ORIG) {
5177 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5178 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5179 }
5180 return status;
5181
5182 restore_config:
5183 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5184 return status;
5185 }
5186
5187 /**
5188 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5189 * @hw: pointer to the hw struct
5190 * @reg_addr: register address
5191 * @reg_val: ptr to register value
5192 * @cmd_details: pointer to command details structure or NULL
5193 *
5194 * Use the firmware to read the Rx control register,
5195 * especially useful if the Rx unit is under heavy pressure
5196 **/
i40e_aq_rx_ctl_read_register(struct i40e_hw * hw,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5197 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5198 u32 reg_addr, u32 *reg_val,
5199 struct i40e_asq_cmd_details *cmd_details)
5200 {
5201 struct i40e_aq_desc desc;
5202 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5203 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5204 int status;
5205
5206 if (!reg_val)
5207 return -EINVAL;
5208
5209 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5210
5211 cmd_resp->address = cpu_to_le32(reg_addr);
5212
5213 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5214
5215 if (status == 0)
5216 *reg_val = le32_to_cpu(cmd_resp->value);
5217
5218 return status;
5219 }
5220
5221 /**
5222 * i40e_read_rx_ctl - read from an Rx control register
5223 * @hw: pointer to the hw struct
5224 * @reg_addr: register address
5225 **/
i40e_read_rx_ctl(struct i40e_hw * hw,u32 reg_addr)5226 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5227 {
5228 bool use_register;
5229 int status = 0;
5230 int retry = 5;
5231 u32 val = 0;
5232
5233 use_register = (((hw->aq.api_maj_ver == 1) &&
5234 (hw->aq.api_min_ver < 5)) ||
5235 (hw->mac.type == I40E_MAC_X722));
5236 if (!use_register) {
5237 do_retry:
5238 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5239 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5240 usleep_range(1000, 2000);
5241 retry--;
5242 goto do_retry;
5243 }
5244 }
5245
5246 /* if the AQ access failed, try the old-fashioned way */
5247 if (status || use_register)
5248 val = rd32(hw, reg_addr);
5249
5250 return val;
5251 }
5252
5253 /**
5254 * i40e_aq_rx_ctl_write_register
5255 * @hw: pointer to the hw struct
5256 * @reg_addr: register address
5257 * @reg_val: register value
5258 * @cmd_details: pointer to command details structure or NULL
5259 *
5260 * Use the firmware to write to an Rx control register,
5261 * especially useful if the Rx unit is under heavy pressure
5262 **/
i40e_aq_rx_ctl_write_register(struct i40e_hw * hw,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5263 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5264 u32 reg_addr, u32 reg_val,
5265 struct i40e_asq_cmd_details *cmd_details)
5266 {
5267 struct i40e_aq_desc desc;
5268 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5269 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5270 int status;
5271
5272 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5273
5274 cmd->address = cpu_to_le32(reg_addr);
5275 cmd->value = cpu_to_le32(reg_val);
5276
5277 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5278
5279 return status;
5280 }
5281
5282 /**
5283 * i40e_write_rx_ctl - write to an Rx control register
5284 * @hw: pointer to the hw struct
5285 * @reg_addr: register address
5286 * @reg_val: register value
5287 **/
i40e_write_rx_ctl(struct i40e_hw * hw,u32 reg_addr,u32 reg_val)5288 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5289 {
5290 bool use_register;
5291 int status = 0;
5292 int retry = 5;
5293
5294 use_register = (((hw->aq.api_maj_ver == 1) &&
5295 (hw->aq.api_min_ver < 5)) ||
5296 (hw->mac.type == I40E_MAC_X722));
5297 if (!use_register) {
5298 do_retry:
5299 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5300 reg_val, NULL);
5301 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5302 usleep_range(1000, 2000);
5303 retry--;
5304 goto do_retry;
5305 }
5306 }
5307
5308 /* if the AQ access failed, try the old-fashioned way */
5309 if (status || use_register)
5310 wr32(hw, reg_addr, reg_val);
5311 }
5312
5313 /**
5314 * i40e_mdio_if_number_selection - MDIO I/F number selection
5315 * @hw: pointer to the hw struct
5316 * @set_mdio: use MDIO I/F number specified by mdio_num
5317 * @mdio_num: MDIO I/F number
5318 * @cmd: pointer to PHY Register command structure
5319 **/
i40e_mdio_if_number_selection(struct i40e_hw * hw,bool set_mdio,u8 mdio_num,struct i40e_aqc_phy_register_access * cmd)5320 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5321 u8 mdio_num,
5322 struct i40e_aqc_phy_register_access *cmd)
5323 {
5324 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5325 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5326 cmd->cmd_flags |=
5327 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5328 ((mdio_num <<
5329 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5330 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5331 else
5332 i40e_debug(hw, I40E_DEBUG_PHY,
5333 "MDIO I/F number selection not supported by current FW version.\n");
5334 }
5335 }
5336
5337 /**
5338 * i40e_aq_set_phy_register_ext
5339 * @hw: pointer to the hw struct
5340 * @phy_select: select which phy should be accessed
5341 * @dev_addr: PHY device address
5342 * @page_change: flag to indicate if phy page should be updated
5343 * @set_mdio: use MDIO I/F number specified by mdio_num
5344 * @mdio_num: MDIO I/F number
5345 * @reg_addr: PHY register address
5346 * @reg_val: new register value
5347 * @cmd_details: pointer to command details structure or NULL
5348 *
5349 * Write the external PHY register.
5350 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5351 * may use simple wrapper i40e_aq_set_phy_register.
5352 **/
i40e_aq_set_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5353 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5354 u8 phy_select, u8 dev_addr, bool page_change,
5355 bool set_mdio, u8 mdio_num,
5356 u32 reg_addr, u32 reg_val,
5357 struct i40e_asq_cmd_details *cmd_details)
5358 {
5359 struct i40e_aq_desc desc;
5360 struct i40e_aqc_phy_register_access *cmd =
5361 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5362 int status;
5363
5364 i40e_fill_default_direct_cmd_desc(&desc,
5365 i40e_aqc_opc_set_phy_register);
5366
5367 cmd->phy_interface = phy_select;
5368 cmd->dev_address = dev_addr;
5369 cmd->reg_address = cpu_to_le32(reg_addr);
5370 cmd->reg_value = cpu_to_le32(reg_val);
5371
5372 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5373
5374 if (!page_change)
5375 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5376
5377 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5378
5379 return status;
5380 }
5381
5382 /**
5383 * i40e_aq_get_phy_register_ext
5384 * @hw: pointer to the hw struct
5385 * @phy_select: select which phy should be accessed
5386 * @dev_addr: PHY device address
5387 * @page_change: flag to indicate if phy page should be updated
5388 * @set_mdio: use MDIO I/F number specified by mdio_num
5389 * @mdio_num: MDIO I/F number
5390 * @reg_addr: PHY register address
5391 * @reg_val: read register value
5392 * @cmd_details: pointer to command details structure or NULL
5393 *
5394 * Read the external PHY register.
5395 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5396 * may use simple wrapper i40e_aq_get_phy_register.
5397 **/
i40e_aq_get_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5398 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5399 u8 phy_select, u8 dev_addr, bool page_change,
5400 bool set_mdio, u8 mdio_num,
5401 u32 reg_addr, u32 *reg_val,
5402 struct i40e_asq_cmd_details *cmd_details)
5403 {
5404 struct i40e_aq_desc desc;
5405 struct i40e_aqc_phy_register_access *cmd =
5406 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5407 int status;
5408
5409 i40e_fill_default_direct_cmd_desc(&desc,
5410 i40e_aqc_opc_get_phy_register);
5411
5412 cmd->phy_interface = phy_select;
5413 cmd->dev_address = dev_addr;
5414 cmd->reg_address = cpu_to_le32(reg_addr);
5415
5416 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5417
5418 if (!page_change)
5419 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5420
5421 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5422 if (!status)
5423 *reg_val = le32_to_cpu(cmd->reg_value);
5424
5425 return status;
5426 }
5427
5428 /**
5429 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5430 * @hw: pointer to the hw struct
5431 * @buff: command buffer (size in bytes = buff_size)
5432 * @buff_size: buffer size in bytes
5433 * @track_id: package tracking id
5434 * @error_offset: returns error offset
5435 * @error_info: returns error information
5436 * @cmd_details: pointer to command details structure or NULL
5437 **/
i40e_aq_write_ddp(struct i40e_hw * hw,void * buff,u16 buff_size,u32 track_id,u32 * error_offset,u32 * error_info,struct i40e_asq_cmd_details * cmd_details)5438 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5439 u16 buff_size, u32 track_id,
5440 u32 *error_offset, u32 *error_info,
5441 struct i40e_asq_cmd_details *cmd_details)
5442 {
5443 struct i40e_aq_desc desc;
5444 struct i40e_aqc_write_personalization_profile *cmd =
5445 (struct i40e_aqc_write_personalization_profile *)
5446 &desc.params.raw;
5447 struct i40e_aqc_write_ddp_resp *resp;
5448 int status;
5449
5450 i40e_fill_default_direct_cmd_desc(&desc,
5451 i40e_aqc_opc_write_personalization_profile);
5452
5453 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5454 if (buff_size > I40E_AQ_LARGE_BUF)
5455 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5456
5457 desc.datalen = cpu_to_le16(buff_size);
5458
5459 cmd->profile_track_id = cpu_to_le32(track_id);
5460
5461 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5462 if (!status) {
5463 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5464 if (error_offset)
5465 *error_offset = le32_to_cpu(resp->error_offset);
5466 if (error_info)
5467 *error_info = le32_to_cpu(resp->error_info);
5468 }
5469
5470 return status;
5471 }
5472
5473 /**
5474 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5475 * @hw: pointer to the hw struct
5476 * @buff: command buffer (size in bytes = buff_size)
5477 * @buff_size: buffer size in bytes
5478 * @flags: AdminQ command flags
5479 * @cmd_details: pointer to command details structure or NULL
5480 **/
i40e_aq_get_ddp_list(struct i40e_hw * hw,void * buff,u16 buff_size,u8 flags,struct i40e_asq_cmd_details * cmd_details)5481 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5482 u16 buff_size, u8 flags,
5483 struct i40e_asq_cmd_details *cmd_details)
5484 {
5485 struct i40e_aq_desc desc;
5486 struct i40e_aqc_get_applied_profiles *cmd =
5487 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5488 int status;
5489
5490 i40e_fill_default_direct_cmd_desc(&desc,
5491 i40e_aqc_opc_get_personalization_profile_list);
5492
5493 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5494 if (buff_size > I40E_AQ_LARGE_BUF)
5495 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5496 desc.datalen = cpu_to_le16(buff_size);
5497
5498 cmd->flags = flags;
5499
5500 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5501
5502 return status;
5503 }
5504
5505 /**
5506 * i40e_find_segment_in_package
5507 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5508 * @pkg_hdr: pointer to the package header to be searched
5509 *
5510 * This function searches a package file for a particular segment type. On
5511 * success it returns a pointer to the segment header, otherwise it will
5512 * return NULL.
5513 **/
5514 struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,struct i40e_package_header * pkg_hdr)5515 i40e_find_segment_in_package(u32 segment_type,
5516 struct i40e_package_header *pkg_hdr)
5517 {
5518 struct i40e_generic_seg_header *segment;
5519 u32 i;
5520
5521 /* Search all package segments for the requested segment type */
5522 for (i = 0; i < pkg_hdr->segment_count; i++) {
5523 segment =
5524 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5525 pkg_hdr->segment_offset[i]);
5526
5527 if (segment->type == segment_type)
5528 return segment;
5529 }
5530
5531 return NULL;
5532 }
5533
5534 /* Get section table in profile */
5535 #define I40E_SECTION_TABLE(profile, sec_tbl) \
5536 do { \
5537 struct i40e_profile_segment *p = (profile); \
5538 u32 count; \
5539 u32 *nvm; \
5540 count = p->device_table_count; \
5541 nvm = (u32 *)&p->device_table[count]; \
5542 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5543 } while (0)
5544
5545 /* Get section header in profile */
5546 #define I40E_SECTION_HEADER(profile, offset) \
5547 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5548
5549 /**
5550 * i40e_find_section_in_profile
5551 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5552 * @profile: pointer to the i40e segment header to be searched
5553 *
5554 * This function searches i40e segment for a particular section type. On
5555 * success it returns a pointer to the section header, otherwise it will
5556 * return NULL.
5557 **/
5558 struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,struct i40e_profile_segment * profile)5559 i40e_find_section_in_profile(u32 section_type,
5560 struct i40e_profile_segment *profile)
5561 {
5562 struct i40e_profile_section_header *sec;
5563 struct i40e_section_table *sec_tbl;
5564 u32 sec_off;
5565 u32 i;
5566
5567 if (profile->header.type != SEGMENT_TYPE_I40E)
5568 return NULL;
5569
5570 I40E_SECTION_TABLE(profile, sec_tbl);
5571
5572 for (i = 0; i < sec_tbl->section_count; i++) {
5573 sec_off = sec_tbl->section_offset[i];
5574 sec = I40E_SECTION_HEADER(profile, sec_off);
5575 if (sec->section.type == section_type)
5576 return sec;
5577 }
5578
5579 return NULL;
5580 }
5581
5582 /**
5583 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5584 * @hw: pointer to the hw struct
5585 * @aq: command buffer containing all data to execute AQ
5586 **/
i40e_ddp_exec_aq_section(struct i40e_hw * hw,struct i40e_profile_aq_section * aq)5587 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5588 struct i40e_profile_aq_section *aq)
5589 {
5590 struct i40e_aq_desc desc;
5591 u8 *msg = NULL;
5592 u16 msglen;
5593 int status;
5594
5595 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5596 desc.flags |= cpu_to_le16(aq->flags);
5597 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5598
5599 msglen = aq->datalen;
5600 if (msglen) {
5601 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5602 I40E_AQ_FLAG_RD));
5603 if (msglen > I40E_AQ_LARGE_BUF)
5604 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5605 desc.datalen = cpu_to_le16(msglen);
5606 msg = &aq->data[0];
5607 }
5608
5609 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5610
5611 if (status) {
5612 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5613 "unable to exec DDP AQ opcode %u, error %d\n",
5614 aq->opcode, status);
5615 return status;
5616 }
5617
5618 /* copy returned desc to aq_buf */
5619 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5620
5621 return 0;
5622 }
5623
5624 /**
5625 * i40e_validate_profile
5626 * @hw: pointer to the hardware structure
5627 * @profile: pointer to the profile segment of the package to be validated
5628 * @track_id: package tracking id
5629 * @rollback: flag if the profile is for rollback.
5630 *
5631 * Validates supported devices and profile's sections.
5632 */
5633 static int
i40e_validate_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id,bool rollback)5634 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5635 u32 track_id, bool rollback)
5636 {
5637 struct i40e_profile_section_header *sec = NULL;
5638 struct i40e_section_table *sec_tbl;
5639 u32 vendor_dev_id;
5640 int status = 0;
5641 u32 dev_cnt;
5642 u32 sec_off;
5643 u32 i;
5644
5645 if (track_id == I40E_DDP_TRACKID_INVALID) {
5646 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5647 return -EOPNOTSUPP;
5648 }
5649
5650 dev_cnt = profile->device_table_count;
5651 for (i = 0; i < dev_cnt; i++) {
5652 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5653 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5654 hw->device_id == (vendor_dev_id & 0xFFFF))
5655 break;
5656 }
5657 if (dev_cnt && i == dev_cnt) {
5658 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5659 "Device doesn't support DDP\n");
5660 return -ENODEV;
5661 }
5662
5663 I40E_SECTION_TABLE(profile, sec_tbl);
5664
5665 /* Validate sections types */
5666 for (i = 0; i < sec_tbl->section_count; i++) {
5667 sec_off = sec_tbl->section_offset[i];
5668 sec = I40E_SECTION_HEADER(profile, sec_off);
5669 if (rollback) {
5670 if (sec->section.type == SECTION_TYPE_MMIO ||
5671 sec->section.type == SECTION_TYPE_AQ ||
5672 sec->section.type == SECTION_TYPE_RB_AQ) {
5673 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5674 "Not a roll-back package\n");
5675 return -EOPNOTSUPP;
5676 }
5677 } else {
5678 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5679 sec->section.type == SECTION_TYPE_RB_MMIO) {
5680 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5681 "Not an original package\n");
5682 return -EOPNOTSUPP;
5683 }
5684 }
5685 }
5686
5687 return status;
5688 }
5689
5690 /**
5691 * i40e_write_profile
5692 * @hw: pointer to the hardware structure
5693 * @profile: pointer to the profile segment of the package to be downloaded
5694 * @track_id: package tracking id
5695 *
5696 * Handles the download of a complete package.
5697 */
5698 int
i40e_write_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5699 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5700 u32 track_id)
5701 {
5702 struct i40e_profile_section_header *sec = NULL;
5703 struct i40e_profile_aq_section *ddp_aq;
5704 struct i40e_section_table *sec_tbl;
5705 u32 offset = 0, info = 0;
5706 u32 section_size = 0;
5707 int status = 0;
5708 u32 sec_off;
5709 u32 i;
5710
5711 status = i40e_validate_profile(hw, profile, track_id, false);
5712 if (status)
5713 return status;
5714
5715 I40E_SECTION_TABLE(profile, sec_tbl);
5716
5717 for (i = 0; i < sec_tbl->section_count; i++) {
5718 sec_off = sec_tbl->section_offset[i];
5719 sec = I40E_SECTION_HEADER(profile, sec_off);
5720 /* Process generic admin command */
5721 if (sec->section.type == SECTION_TYPE_AQ) {
5722 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5723 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5724 if (status) {
5725 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5726 "Failed to execute aq: section %d, opcode %u\n",
5727 i, ddp_aq->opcode);
5728 break;
5729 }
5730 sec->section.type = SECTION_TYPE_RB_AQ;
5731 }
5732
5733 /* Skip any non-mmio sections */
5734 if (sec->section.type != SECTION_TYPE_MMIO)
5735 continue;
5736
5737 section_size = sec->section.size +
5738 sizeof(struct i40e_profile_section_header);
5739
5740 /* Write MMIO section */
5741 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5742 track_id, &offset, &info, NULL);
5743 if (status) {
5744 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5745 "Failed to write profile: section %d, offset %d, info %d\n",
5746 i, offset, info);
5747 break;
5748 }
5749 }
5750 return status;
5751 }
5752
5753 /**
5754 * i40e_rollback_profile
5755 * @hw: pointer to the hardware structure
5756 * @profile: pointer to the profile segment of the package to be removed
5757 * @track_id: package tracking id
5758 *
5759 * Rolls back previously loaded package.
5760 */
5761 int
i40e_rollback_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5762 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5763 u32 track_id)
5764 {
5765 struct i40e_profile_section_header *sec = NULL;
5766 struct i40e_section_table *sec_tbl;
5767 u32 offset = 0, info = 0;
5768 u32 section_size = 0;
5769 int status = 0;
5770 u32 sec_off;
5771 int i;
5772
5773 status = i40e_validate_profile(hw, profile, track_id, true);
5774 if (status)
5775 return status;
5776
5777 I40E_SECTION_TABLE(profile, sec_tbl);
5778
5779 /* For rollback write sections in reverse */
5780 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5781 sec_off = sec_tbl->section_offset[i];
5782 sec = I40E_SECTION_HEADER(profile, sec_off);
5783
5784 /* Skip any non-rollback sections */
5785 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5786 continue;
5787
5788 section_size = sec->section.size +
5789 sizeof(struct i40e_profile_section_header);
5790
5791 /* Write roll-back MMIO section */
5792 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5793 track_id, &offset, &info, NULL);
5794 if (status) {
5795 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5796 "Failed to write profile: section %d, offset %d, info %d\n",
5797 i, offset, info);
5798 break;
5799 }
5800 }
5801 return status;
5802 }
5803
5804 /**
5805 * i40e_add_pinfo_to_list
5806 * @hw: pointer to the hardware structure
5807 * @profile: pointer to the profile segment of the package
5808 * @profile_info_sec: buffer for information section
5809 * @track_id: package tracking id
5810 *
5811 * Register a profile to the list of loaded profiles.
5812 */
5813 int
i40e_add_pinfo_to_list(struct i40e_hw * hw,struct i40e_profile_segment * profile,u8 * profile_info_sec,u32 track_id)5814 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5815 struct i40e_profile_segment *profile,
5816 u8 *profile_info_sec, u32 track_id)
5817 {
5818 struct i40e_profile_section_header *sec = NULL;
5819 struct i40e_profile_info *pinfo;
5820 u32 offset = 0, info = 0;
5821 int status = 0;
5822
5823 sec = (struct i40e_profile_section_header *)profile_info_sec;
5824 sec->tbl_size = 1;
5825 sec->data_end = sizeof(struct i40e_profile_section_header) +
5826 sizeof(struct i40e_profile_info);
5827 sec->section.type = SECTION_TYPE_INFO;
5828 sec->section.offset = sizeof(struct i40e_profile_section_header);
5829 sec->section.size = sizeof(struct i40e_profile_info);
5830 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5831 sec->section.offset);
5832 pinfo->track_id = track_id;
5833 pinfo->version = profile->version;
5834 pinfo->op = I40E_DDP_ADD_TRACKID;
5835 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5836
5837 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5838 track_id, &offset, &info, NULL);
5839
5840 return status;
5841 }
5842
5843 /**
5844 * i40e_aq_add_cloud_filters
5845 * @hw: pointer to the hardware structure
5846 * @seid: VSI seid to add cloud filters from
5847 * @filters: Buffer which contains the filters to be added
5848 * @filter_count: number of filters contained in the buffer
5849 *
5850 * Set the cloud filters for a given VSI. The contents of the
5851 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5852 * of the function.
5853 *
5854 **/
5855 int
i40e_aq_add_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5856 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5857 struct i40e_aqc_cloud_filters_element_data *filters,
5858 u8 filter_count)
5859 {
5860 struct i40e_aq_desc desc;
5861 struct i40e_aqc_add_remove_cloud_filters *cmd =
5862 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5863 u16 buff_len;
5864 int status;
5865
5866 i40e_fill_default_direct_cmd_desc(&desc,
5867 i40e_aqc_opc_add_cloud_filters);
5868
5869 buff_len = filter_count * sizeof(*filters);
5870 desc.datalen = cpu_to_le16(buff_len);
5871 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5872 cmd->num_filters = filter_count;
5873 cmd->seid = cpu_to_le16(seid);
5874
5875 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5876
5877 return status;
5878 }
5879
5880 /**
5881 * i40e_aq_add_cloud_filters_bb
5882 * @hw: pointer to the hardware structure
5883 * @seid: VSI seid to add cloud filters from
5884 * @filters: Buffer which contains the filters in big buffer to be added
5885 * @filter_count: number of filters contained in the buffer
5886 *
5887 * Set the big buffer cloud filters for a given VSI. The contents of the
5888 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5889 * function.
5890 *
5891 **/
5892 int
i40e_aq_add_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5893 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5894 struct i40e_aqc_cloud_filters_element_bb *filters,
5895 u8 filter_count)
5896 {
5897 struct i40e_aq_desc desc;
5898 struct i40e_aqc_add_remove_cloud_filters *cmd =
5899 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5900 u16 buff_len;
5901 int status;
5902 int i;
5903
5904 i40e_fill_default_direct_cmd_desc(&desc,
5905 i40e_aqc_opc_add_cloud_filters);
5906
5907 buff_len = filter_count * sizeof(*filters);
5908 desc.datalen = cpu_to_le16(buff_len);
5909 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5910 cmd->num_filters = filter_count;
5911 cmd->seid = cpu_to_le16(seid);
5912 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5913
5914 for (i = 0; i < filter_count; i++) {
5915 u16 tnl_type;
5916 u32 ti;
5917
5918 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5919 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5920 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5921
5922 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5923 * one more byte further than normally used for Tenant ID in
5924 * other tunnel types.
5925 */
5926 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5927 ti = le32_to_cpu(filters[i].element.tenant_id);
5928 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5929 }
5930 }
5931
5932 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5933
5934 return status;
5935 }
5936
5937 /**
5938 * i40e_aq_rem_cloud_filters
5939 * @hw: pointer to the hardware structure
5940 * @seid: VSI seid to remove cloud filters from
5941 * @filters: Buffer which contains the filters to be removed
5942 * @filter_count: number of filters contained in the buffer
5943 *
5944 * Remove the cloud filters for a given VSI. The contents of the
5945 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5946 * of the function.
5947 *
5948 **/
5949 int
i40e_aq_rem_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5950 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5951 struct i40e_aqc_cloud_filters_element_data *filters,
5952 u8 filter_count)
5953 {
5954 struct i40e_aq_desc desc;
5955 struct i40e_aqc_add_remove_cloud_filters *cmd =
5956 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5957 u16 buff_len;
5958 int status;
5959
5960 i40e_fill_default_direct_cmd_desc(&desc,
5961 i40e_aqc_opc_remove_cloud_filters);
5962
5963 buff_len = filter_count * sizeof(*filters);
5964 desc.datalen = cpu_to_le16(buff_len);
5965 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5966 cmd->num_filters = filter_count;
5967 cmd->seid = cpu_to_le16(seid);
5968
5969 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5970
5971 return status;
5972 }
5973
5974 /**
5975 * i40e_aq_rem_cloud_filters_bb
5976 * @hw: pointer to the hardware structure
5977 * @seid: VSI seid to remove cloud filters from
5978 * @filters: Buffer which contains the filters in big buffer to be removed
5979 * @filter_count: number of filters contained in the buffer
5980 *
5981 * Remove the big buffer cloud filters for a given VSI. The contents of the
5982 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5983 * function.
5984 *
5985 **/
5986 int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5987 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5988 struct i40e_aqc_cloud_filters_element_bb *filters,
5989 u8 filter_count)
5990 {
5991 struct i40e_aq_desc desc;
5992 struct i40e_aqc_add_remove_cloud_filters *cmd =
5993 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5994 u16 buff_len;
5995 int status;
5996 int i;
5997
5998 i40e_fill_default_direct_cmd_desc(&desc,
5999 i40e_aqc_opc_remove_cloud_filters);
6000
6001 buff_len = filter_count * sizeof(*filters);
6002 desc.datalen = cpu_to_le16(buff_len);
6003 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6004 cmd->num_filters = filter_count;
6005 cmd->seid = cpu_to_le16(seid);
6006 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6007
6008 for (i = 0; i < filter_count; i++) {
6009 u16 tnl_type;
6010 u32 ti;
6011
6012 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6013 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6014 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6015
6016 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6017 * one more byte further than normally used for Tenant ID in
6018 * other tunnel types.
6019 */
6020 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6021 ti = le32_to_cpu(filters[i].element.tenant_id);
6022 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6023 }
6024 }
6025
6026 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6027
6028 return status;
6029 }
6030