1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e.h"
5 
6 /*********************notification routines***********************/
7 
8 /**
9  * i40e_vc_vf_broadcast
10  * @pf: pointer to the PF structure
11  * @v_opcode: operation code
12  * @v_retval: return value
13  * @msg: pointer to the msg buffer
14  * @msglen: msg length
15  *
16  * send a message to all VFs on a given PF
17  **/
i40e_vc_vf_broadcast(struct i40e_pf * pf,enum virtchnl_ops v_opcode,i40e_status v_retval,u8 * msg,u16 msglen)18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 				 enum virtchnl_ops v_opcode,
20 				 i40e_status v_retval, u8 *msg,
21 				 u16 msglen)
22 {
23 	struct i40e_hw *hw = &pf->hw;
24 	struct i40e_vf *vf = pf->vf;
25 	int i;
26 
27 	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 		/* Not all vfs are enabled so skip the ones that are not */
30 		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 			continue;
33 
34 		/* Ignore return value on purpose - a given VF may fail, but
35 		 * we need to keep going and send to all of them
36 		 */
37 		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 				       msg, msglen, NULL);
39 	}
40 }
41 
42 /**
43  * i40e_vc_link_speed2mbps
44  * converts i40e_aq_link_speed to integer value of Mbps
45  * @link_speed: the speed to convert
46  *
47  * return the speed as direct value of Mbps.
48  **/
49 static u32
i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
51 {
52 	switch (link_speed) {
53 	case I40E_LINK_SPEED_100MB:
54 		return SPEED_100;
55 	case I40E_LINK_SPEED_1GB:
56 		return SPEED_1000;
57 	case I40E_LINK_SPEED_2_5GB:
58 		return SPEED_2500;
59 	case I40E_LINK_SPEED_5GB:
60 		return SPEED_5000;
61 	case I40E_LINK_SPEED_10GB:
62 		return SPEED_10000;
63 	case I40E_LINK_SPEED_20GB:
64 		return SPEED_20000;
65 	case I40E_LINK_SPEED_25GB:
66 		return SPEED_25000;
67 	case I40E_LINK_SPEED_40GB:
68 		return SPEED_40000;
69 	case I40E_LINK_SPEED_UNKNOWN:
70 		return SPEED_UNKNOWN;
71 	}
72 	return SPEED_UNKNOWN;
73 }
74 
75 /**
76  * i40e_set_vf_link_state
77  * @vf: pointer to the VF structure
78  * @pfe: pointer to PF event structure
79  * @ls: pointer to link status structure
80  *
81  * set a link state on a single vf
82  **/
i40e_set_vf_link_state(struct i40e_vf * vf,struct virtchnl_pf_event * pfe,struct i40e_link_status * ls)83 static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
85 {
86 	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
87 
88 	if (vf->link_forced)
89 		link_status = vf->link_up;
90 
91 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 		pfe->event_data.link_event_adv.link_speed = link_status ?
93 			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 		pfe->event_data.link_event_adv.link_status = link_status;
95 	} else {
96 		pfe->event_data.link_event.link_speed = link_status ?
97 			i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 		pfe->event_data.link_event.link_status = link_status;
99 	}
100 }
101 
102 /**
103  * i40e_vc_notify_vf_link_state
104  * @vf: pointer to the VF structure
105  *
106  * send a link status message to a single VF
107  **/
i40e_vc_notify_vf_link_state(struct i40e_vf * vf)108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
109 {
110 	struct virtchnl_pf_event pfe;
111 	struct i40e_pf *pf = vf->pf;
112 	struct i40e_hw *hw = &pf->hw;
113 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
115 
116 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 	pfe.severity = PF_EVENT_SEVERITY_INFO;
118 
119 	i40e_set_vf_link_state(vf, &pfe, ls);
120 
121 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
123 }
124 
125 /**
126  * i40e_vc_notify_link_state
127  * @pf: pointer to the PF structure
128  *
129  * send a link status message to all VFs on a given PF
130  **/
i40e_vc_notify_link_state(struct i40e_pf * pf)131 void i40e_vc_notify_link_state(struct i40e_pf *pf)
132 {
133 	int i;
134 
135 	for (i = 0; i < pf->num_alloc_vfs; i++)
136 		i40e_vc_notify_vf_link_state(&pf->vf[i]);
137 }
138 
139 /**
140  * i40e_vc_notify_reset
141  * @pf: pointer to the PF structure
142  *
143  * indicate a pending reset to all VFs on a given PF
144  **/
i40e_vc_notify_reset(struct i40e_pf * pf)145 void i40e_vc_notify_reset(struct i40e_pf *pf)
146 {
147 	struct virtchnl_pf_event pfe;
148 
149 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
153 }
154 
155 /**
156  * i40e_vc_notify_vf_reset
157  * @vf: pointer to the VF structure
158  *
159  * indicate a pending reset to the given VF
160  **/
i40e_vc_notify_vf_reset(struct i40e_vf * vf)161 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
162 {
163 	struct virtchnl_pf_event pfe;
164 	int abs_vf_id;
165 
166 	/* validate the request */
167 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
168 		return;
169 
170 	/* verify if the VF is in either init or active before proceeding */
171 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
172 	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
173 		return;
174 
175 	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
176 
177 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
178 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
179 	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
180 			       0, (u8 *)&pfe,
181 			       sizeof(struct virtchnl_pf_event), NULL);
182 }
183 /***********************misc routines*****************************/
184 
185 /**
186  * i40e_vc_reset_vf
187  * @vf: pointer to the VF info
188  * @notify_vf: notify vf about reset or not
189  * Reset VF handler.
190  **/
i40e_vc_reset_vf(struct i40e_vf * vf,bool notify_vf)191 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
192 {
193 	struct i40e_pf *pf = vf->pf;
194 	int i;
195 
196 	if (notify_vf)
197 		i40e_vc_notify_vf_reset(vf);
198 
199 	/* We want to ensure that an actual reset occurs initiated after this
200 	 * function was called. However, we do not want to wait forever, so
201 	 * we'll give a reasonable time and print a message if we failed to
202 	 * ensure a reset.
203 	 */
204 	for (i = 0; i < 20; i++) {
205 		/* If PF is in VFs releasing state reset VF is impossible,
206 		 * so leave it.
207 		 */
208 		if (test_bit(__I40E_VFS_RELEASING, pf->state))
209 			return;
210 		if (i40e_reset_vf(vf, false))
211 			return;
212 		usleep_range(10000, 20000);
213 	}
214 
215 	if (notify_vf)
216 		dev_warn(&vf->pf->pdev->dev,
217 			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
218 			 vf->vf_id);
219 	else
220 		dev_dbg(&vf->pf->pdev->dev,
221 			"Failed to initiate reset for VF %d after 200 milliseconds\n",
222 			vf->vf_id);
223 }
224 
225 /**
226  * i40e_vc_isvalid_vsi_id
227  * @vf: pointer to the VF info
228  * @vsi_id: VF relative VSI id
229  *
230  * check for the valid VSI id
231  **/
i40e_vc_isvalid_vsi_id(struct i40e_vf * vf,u16 vsi_id)232 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
233 {
234 	struct i40e_pf *pf = vf->pf;
235 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
236 
237 	return (vsi && (vsi->vf_id == vf->vf_id));
238 }
239 
240 /**
241  * i40e_vc_isvalid_queue_id
242  * @vf: pointer to the VF info
243  * @vsi_id: vsi id
244  * @qid: vsi relative queue id
245  *
246  * check for the valid queue id
247  **/
i40e_vc_isvalid_queue_id(struct i40e_vf * vf,u16 vsi_id,u16 qid)248 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
249 					    u16 qid)
250 {
251 	struct i40e_pf *pf = vf->pf;
252 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
253 
254 	return (vsi && (qid < vsi->alloc_queue_pairs));
255 }
256 
257 /**
258  * i40e_vc_isvalid_vector_id
259  * @vf: pointer to the VF info
260  * @vector_id: VF relative vector id
261  *
262  * check for the valid vector id
263  **/
i40e_vc_isvalid_vector_id(struct i40e_vf * vf,u32 vector_id)264 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
265 {
266 	struct i40e_pf *pf = vf->pf;
267 
268 	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
269 }
270 
271 /***********************vf resource mgmt routines*****************/
272 
273 /**
274  * i40e_vc_get_pf_queue_id
275  * @vf: pointer to the VF info
276  * @vsi_id: id of VSI as provided by the FW
277  * @vsi_queue_id: vsi relative queue id
278  *
279  * return PF relative queue id
280  **/
i40e_vc_get_pf_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 vsi_queue_id)281 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
282 				   u8 vsi_queue_id)
283 {
284 	struct i40e_pf *pf = vf->pf;
285 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
286 	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
287 
288 	if (!vsi)
289 		return pf_queue_id;
290 
291 	if (le16_to_cpu(vsi->info.mapping_flags) &
292 	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
293 		pf_queue_id =
294 			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
295 	else
296 		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
297 			      vsi_queue_id;
298 
299 	return pf_queue_id;
300 }
301 
302 /**
303  * i40e_get_real_pf_qid
304  * @vf: pointer to the VF info
305  * @vsi_id: vsi id
306  * @queue_id: queue number
307  *
308  * wrapper function to get pf_queue_id handling ADq code as well
309  **/
i40e_get_real_pf_qid(struct i40e_vf * vf,u16 vsi_id,u16 queue_id)310 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
311 {
312 	int i;
313 
314 	if (vf->adq_enabled) {
315 		/* Although VF considers all the queues(can be 1 to 16) as its
316 		 * own but they may actually belong to different VSIs(up to 4).
317 		 * We need to find which queues belongs to which VSI.
318 		 */
319 		for (i = 0; i < vf->num_tc; i++) {
320 			if (queue_id < vf->ch[i].num_qps) {
321 				vsi_id = vf->ch[i].vsi_id;
322 				break;
323 			}
324 			/* find right queue id which is relative to a
325 			 * given VSI.
326 			 */
327 			queue_id -= vf->ch[i].num_qps;
328 			}
329 		}
330 
331 	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
332 }
333 
334 /**
335  * i40e_config_irq_link_list
336  * @vf: pointer to the VF info
337  * @vsi_id: id of VSI as given by the FW
338  * @vecmap: irq map info
339  *
340  * configure irq link list from the map
341  **/
i40e_config_irq_link_list(struct i40e_vf * vf,u16 vsi_id,struct virtchnl_vector_map * vecmap)342 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
343 				      struct virtchnl_vector_map *vecmap)
344 {
345 	unsigned long linklistmap = 0, tempmap;
346 	struct i40e_pf *pf = vf->pf;
347 	struct i40e_hw *hw = &pf->hw;
348 	u16 vsi_queue_id, pf_queue_id;
349 	enum i40e_queue_type qtype;
350 	u16 next_q, vector_id, size;
351 	u32 reg, reg_idx;
352 	u16 itr_idx = 0;
353 
354 	vector_id = vecmap->vector_id;
355 	/* setup the head */
356 	if (0 == vector_id)
357 		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
358 	else
359 		reg_idx = I40E_VPINT_LNKLSTN(
360 		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
361 		     (vector_id - 1));
362 
363 	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
364 		/* Special case - No queues mapped on this vector */
365 		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
366 		goto irq_list_done;
367 	}
368 	tempmap = vecmap->rxq_map;
369 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
370 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
371 				    vsi_queue_id));
372 	}
373 
374 	tempmap = vecmap->txq_map;
375 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
376 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
377 				     vsi_queue_id + 1));
378 	}
379 
380 	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
381 	next_q = find_first_bit(&linklistmap, size);
382 	if (unlikely(next_q == size))
383 		goto irq_list_done;
384 
385 	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
386 	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
387 	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
388 	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
389 
390 	wr32(hw, reg_idx, reg);
391 
392 	while (next_q < size) {
393 		switch (qtype) {
394 		case I40E_QUEUE_TYPE_RX:
395 			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
396 			itr_idx = vecmap->rxitr_idx;
397 			break;
398 		case I40E_QUEUE_TYPE_TX:
399 			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
400 			itr_idx = vecmap->txitr_idx;
401 			break;
402 		default:
403 			break;
404 		}
405 
406 		next_q = find_next_bit(&linklistmap, size, next_q + 1);
407 		if (next_q < size) {
408 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
410 			pf_queue_id = i40e_get_real_pf_qid(vf,
411 							   vsi_id,
412 							   vsi_queue_id);
413 		} else {
414 			pf_queue_id = I40E_QUEUE_END_OF_LIST;
415 			qtype = 0;
416 		}
417 
418 		/* format for the RQCTL & TQCTL regs is same */
419 		reg = (vector_id) |
420 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
421 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
422 		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
423 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
424 		wr32(hw, reg_idx, reg);
425 	}
426 
427 	/* if the vf is running in polling mode and using interrupt zero,
428 	 * need to disable auto-mask on enabling zero interrupt for VFs.
429 	 */
430 	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
431 	    (vector_id == 0)) {
432 		reg = rd32(hw, I40E_GLINT_CTL);
433 		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
434 			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
435 			wr32(hw, I40E_GLINT_CTL, reg);
436 		}
437 	}
438 
439 irq_list_done:
440 	i40e_flush(hw);
441 }
442 
443 /**
444  * i40e_release_iwarp_qvlist
445  * @vf: pointer to the VF.
446  *
447  **/
i40e_release_iwarp_qvlist(struct i40e_vf * vf)448 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
449 {
450 	struct i40e_pf *pf = vf->pf;
451 	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
452 	u32 msix_vf;
453 	u32 i;
454 
455 	if (!vf->qvlist_info)
456 		return;
457 
458 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
459 	for (i = 0; i < qvlist_info->num_vectors; i++) {
460 		struct virtchnl_iwarp_qv_info *qv_info;
461 		u32 next_q_index, next_q_type;
462 		struct i40e_hw *hw = &pf->hw;
463 		u32 v_idx, reg_idx, reg;
464 
465 		qv_info = &qvlist_info->qv_info[i];
466 		if (!qv_info)
467 			continue;
468 		v_idx = qv_info->v_idx;
469 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
470 			/* Figure out the queue after CEQ and make that the
471 			 * first queue.
472 			 */
473 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
474 			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
475 			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
476 					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
477 			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
478 					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
479 
480 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
481 			reg = (next_q_index &
482 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
483 			       (next_q_type <<
484 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
485 
486 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
487 		}
488 	}
489 	kfree(vf->qvlist_info);
490 	vf->qvlist_info = NULL;
491 }
492 
493 /**
494  * i40e_config_iwarp_qvlist
495  * @vf: pointer to the VF info
496  * @qvlist_info: queue and vector list
497  *
498  * Return 0 on success or < 0 on error
499  **/
i40e_config_iwarp_qvlist(struct i40e_vf * vf,struct virtchnl_iwarp_qvlist_info * qvlist_info)500 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
501 				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
502 {
503 	struct i40e_pf *pf = vf->pf;
504 	struct i40e_hw *hw = &pf->hw;
505 	struct virtchnl_iwarp_qv_info *qv_info;
506 	u32 v_idx, i, reg_idx, reg;
507 	u32 next_q_idx, next_q_type;
508 	u32 msix_vf;
509 	int ret = 0;
510 
511 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
512 
513 	if (qvlist_info->num_vectors > msix_vf) {
514 		dev_warn(&pf->pdev->dev,
515 			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
516 			 qvlist_info->num_vectors,
517 			 msix_vf);
518 		ret = -EINVAL;
519 		goto err_out;
520 	}
521 
522 	kfree(vf->qvlist_info);
523 	vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
524 					      qvlist_info->num_vectors - 1),
525 				  GFP_KERNEL);
526 	if (!vf->qvlist_info) {
527 		ret = -ENOMEM;
528 		goto err_out;
529 	}
530 	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
531 
532 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
533 	for (i = 0; i < qvlist_info->num_vectors; i++) {
534 		qv_info = &qvlist_info->qv_info[i];
535 		if (!qv_info)
536 			continue;
537 
538 		/* Validate vector id belongs to this vf */
539 		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
540 			ret = -EINVAL;
541 			goto err_free;
542 		}
543 
544 		v_idx = qv_info->v_idx;
545 
546 		vf->qvlist_info->qv_info[i] = *qv_info;
547 
548 		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
549 		/* We might be sharing the interrupt, so get the first queue
550 		 * index and type, push it down the list by adding the new
551 		 * queue on top. Also link it with the new queue in CEQCTL.
552 		 */
553 		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
554 		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
555 				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
556 		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
557 				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
558 
559 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
560 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
561 			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
562 			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
563 			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
564 			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
565 			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
566 			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
567 
568 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
569 			reg = (qv_info->ceq_idx &
570 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
571 			       (I40E_QUEUE_TYPE_PE_CEQ <<
572 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
573 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
574 		}
575 
576 		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
577 			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
578 			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
579 			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
580 
581 			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
582 		}
583 	}
584 
585 	return 0;
586 err_free:
587 	kfree(vf->qvlist_info);
588 	vf->qvlist_info = NULL;
589 err_out:
590 	return ret;
591 }
592 
593 /**
594  * i40e_config_vsi_tx_queue
595  * @vf: pointer to the VF info
596  * @vsi_id: id of VSI as provided by the FW
597  * @vsi_queue_id: vsi relative queue index
598  * @info: config. info
599  *
600  * configure tx queue
601  **/
i40e_config_vsi_tx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_txq_info * info)602 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
603 				    u16 vsi_queue_id,
604 				    struct virtchnl_txq_info *info)
605 {
606 	struct i40e_pf *pf = vf->pf;
607 	struct i40e_hw *hw = &pf->hw;
608 	struct i40e_hmc_obj_txq tx_ctx;
609 	struct i40e_vsi *vsi;
610 	u16 pf_queue_id;
611 	u32 qtx_ctl;
612 	int ret = 0;
613 
614 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
615 		ret = -ENOENT;
616 		goto error_context;
617 	}
618 	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
619 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
620 	if (!vsi) {
621 		ret = -ENOENT;
622 		goto error_context;
623 	}
624 
625 	/* clear the context structure first */
626 	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
627 
628 	/* only set the required fields */
629 	tx_ctx.base = info->dma_ring_addr / 128;
630 	tx_ctx.qlen = info->ring_len;
631 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
632 	tx_ctx.rdylist_act = 0;
633 	tx_ctx.head_wb_ena = info->headwb_enabled;
634 	tx_ctx.head_wb_addr = info->dma_headwb_addr;
635 
636 	/* clear the context in the HMC */
637 	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
638 	if (ret) {
639 		dev_err(&pf->pdev->dev,
640 			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
641 			pf_queue_id, ret);
642 		ret = -ENOENT;
643 		goto error_context;
644 	}
645 
646 	/* set the context in the HMC */
647 	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
648 	if (ret) {
649 		dev_err(&pf->pdev->dev,
650 			"Failed to set VF LAN Tx queue context %d error: %d\n",
651 			pf_queue_id, ret);
652 		ret = -ENOENT;
653 		goto error_context;
654 	}
655 
656 	/* associate this queue with the PCI VF function */
657 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
658 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
659 		    & I40E_QTX_CTL_PF_INDX_MASK);
660 	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
661 		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
662 		    & I40E_QTX_CTL_VFVM_INDX_MASK);
663 	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
664 	i40e_flush(hw);
665 
666 error_context:
667 	return ret;
668 }
669 
670 /**
671  * i40e_config_vsi_rx_queue
672  * @vf: pointer to the VF info
673  * @vsi_id: id of VSI  as provided by the FW
674  * @vsi_queue_id: vsi relative queue index
675  * @info: config. info
676  *
677  * configure rx queue
678  **/
i40e_config_vsi_rx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_rxq_info * info)679 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
680 				    u16 vsi_queue_id,
681 				    struct virtchnl_rxq_info *info)
682 {
683 	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
684 	struct i40e_pf *pf = vf->pf;
685 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
686 	struct i40e_hw *hw = &pf->hw;
687 	struct i40e_hmc_obj_rxq rx_ctx;
688 	int ret = 0;
689 
690 	/* clear the context structure first */
691 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
692 
693 	/* only set the required fields */
694 	rx_ctx.base = info->dma_ring_addr / 128;
695 	rx_ctx.qlen = info->ring_len;
696 
697 	if (info->splithdr_enabled) {
698 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
699 				  I40E_RX_SPLIT_IP      |
700 				  I40E_RX_SPLIT_TCP_UDP |
701 				  I40E_RX_SPLIT_SCTP;
702 		/* header length validation */
703 		if (info->hdr_size > ((2 * 1024) - 64)) {
704 			ret = -EINVAL;
705 			goto error_param;
706 		}
707 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
708 
709 		/* set split mode 10b */
710 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
711 	}
712 
713 	/* databuffer length validation */
714 	if (info->databuffer_size > ((16 * 1024) - 128)) {
715 		ret = -EINVAL;
716 		goto error_param;
717 	}
718 	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
719 
720 	/* max pkt. length validation */
721 	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
722 		ret = -EINVAL;
723 		goto error_param;
724 	}
725 	rx_ctx.rxmax = info->max_pkt_size;
726 
727 	/* if port VLAN is configured increase the max packet size */
728 	if (vsi->info.pvid)
729 		rx_ctx.rxmax += VLAN_HLEN;
730 
731 	/* enable 32bytes desc always */
732 	rx_ctx.dsize = 1;
733 
734 	/* default values */
735 	rx_ctx.lrxqthresh = 1;
736 	rx_ctx.crcstrip = 1;
737 	rx_ctx.prefena = 1;
738 	rx_ctx.l2tsel = 1;
739 
740 	/* clear the context in the HMC */
741 	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
742 	if (ret) {
743 		dev_err(&pf->pdev->dev,
744 			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
745 			pf_queue_id, ret);
746 		ret = -ENOENT;
747 		goto error_param;
748 	}
749 
750 	/* set the context in the HMC */
751 	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
752 	if (ret) {
753 		dev_err(&pf->pdev->dev,
754 			"Failed to set VF LAN Rx queue context %d error: %d\n",
755 			pf_queue_id, ret);
756 		ret = -ENOENT;
757 		goto error_param;
758 	}
759 
760 error_param:
761 	return ret;
762 }
763 
764 /**
765  * i40e_alloc_vsi_res
766  * @vf: pointer to the VF info
767  * @idx: VSI index, applies only for ADq mode, zero otherwise
768  *
769  * alloc VF vsi context & resources
770  **/
i40e_alloc_vsi_res(struct i40e_vf * vf,u8 idx)771 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
772 {
773 	struct i40e_mac_filter *f = NULL;
774 	struct i40e_pf *pf = vf->pf;
775 	struct i40e_vsi *vsi;
776 	u64 max_tx_rate = 0;
777 	int ret = 0;
778 
779 	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
780 			     vf->vf_id);
781 
782 	if (!vsi) {
783 		dev_err(&pf->pdev->dev,
784 			"add vsi failed for VF %d, aq_err %d\n",
785 			vf->vf_id, pf->hw.aq.asq_last_status);
786 		ret = -ENOENT;
787 		goto error_alloc_vsi_res;
788 	}
789 
790 	if (!idx) {
791 		u64 hena = i40e_pf_get_default_rss_hena(pf);
792 		u8 broadcast[ETH_ALEN];
793 
794 		vf->lan_vsi_idx = vsi->idx;
795 		vf->lan_vsi_id = vsi->id;
796 		/* If the port VLAN has been configured and then the
797 		 * VF driver was removed then the VSI port VLAN
798 		 * configuration was destroyed.  Check if there is
799 		 * a port VLAN and restore the VSI configuration if
800 		 * needed.
801 		 */
802 		if (vf->port_vlan_id)
803 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
804 
805 		spin_lock_bh(&vsi->mac_filter_hash_lock);
806 		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
807 			f = i40e_add_mac_filter(vsi,
808 						vf->default_lan_addr.addr);
809 			if (!f)
810 				dev_info(&pf->pdev->dev,
811 					 "Could not add MAC filter %pM for VF %d\n",
812 					vf->default_lan_addr.addr, vf->vf_id);
813 		}
814 		eth_broadcast_addr(broadcast);
815 		f = i40e_add_mac_filter(vsi, broadcast);
816 		if (!f)
817 			dev_info(&pf->pdev->dev,
818 				 "Could not allocate VF broadcast filter\n");
819 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
820 		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
821 		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
822 		/* program mac filter only for VF VSI */
823 		ret = i40e_sync_vsi_filters(vsi);
824 		if (ret)
825 			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
826 	}
827 
828 	/* storing VSI index and id for ADq and don't apply the mac filter */
829 	if (vf->adq_enabled) {
830 		vf->ch[idx].vsi_idx = vsi->idx;
831 		vf->ch[idx].vsi_id = vsi->id;
832 	}
833 
834 	/* Set VF bandwidth if specified */
835 	if (vf->tx_rate) {
836 		max_tx_rate = vf->tx_rate;
837 	} else if (vf->ch[idx].max_tx_rate) {
838 		max_tx_rate = vf->ch[idx].max_tx_rate;
839 	}
840 
841 	if (max_tx_rate) {
842 		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
843 		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
844 						  max_tx_rate, 0, NULL);
845 		if (ret)
846 			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
847 				vf->vf_id, ret);
848 	}
849 
850 error_alloc_vsi_res:
851 	return ret;
852 }
853 
854 /**
855  * i40e_map_pf_queues_to_vsi
856  * @vf: pointer to the VF info
857  *
858  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
859  * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
860  **/
i40e_map_pf_queues_to_vsi(struct i40e_vf * vf)861 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
862 {
863 	struct i40e_pf *pf = vf->pf;
864 	struct i40e_hw *hw = &pf->hw;
865 	u32 reg, num_tc = 1; /* VF has at least one traffic class */
866 	u16 vsi_id, qps;
867 	int i, j;
868 
869 	if (vf->adq_enabled)
870 		num_tc = vf->num_tc;
871 
872 	for (i = 0; i < num_tc; i++) {
873 		if (vf->adq_enabled) {
874 			qps = vf->ch[i].num_qps;
875 			vsi_id =  vf->ch[i].vsi_id;
876 		} else {
877 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
878 			vsi_id = vf->lan_vsi_id;
879 		}
880 
881 		for (j = 0; j < 7; j++) {
882 			if (j * 2 >= qps) {
883 				/* end of list */
884 				reg = 0x07FF07FF;
885 			} else {
886 				u16 qid = i40e_vc_get_pf_queue_id(vf,
887 								  vsi_id,
888 								  j * 2);
889 				reg = qid;
890 				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
891 							      (j * 2) + 1);
892 				reg |= qid << 16;
893 			}
894 			i40e_write_rx_ctl(hw,
895 					  I40E_VSILAN_QTABLE(j, vsi_id),
896 					  reg);
897 		}
898 	}
899 }
900 
901 /**
902  * i40e_map_pf_to_vf_queues
903  * @vf: pointer to the VF info
904  *
905  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
906  * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
907  **/
i40e_map_pf_to_vf_queues(struct i40e_vf * vf)908 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
909 {
910 	struct i40e_pf *pf = vf->pf;
911 	struct i40e_hw *hw = &pf->hw;
912 	u32 reg, total_qps = 0;
913 	u32 qps, num_tc = 1; /* VF has at least one traffic class */
914 	u16 vsi_id, qid;
915 	int i, j;
916 
917 	if (vf->adq_enabled)
918 		num_tc = vf->num_tc;
919 
920 	for (i = 0; i < num_tc; i++) {
921 		if (vf->adq_enabled) {
922 			qps = vf->ch[i].num_qps;
923 			vsi_id =  vf->ch[i].vsi_id;
924 		} else {
925 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
926 			vsi_id = vf->lan_vsi_id;
927 		}
928 
929 		for (j = 0; j < qps; j++) {
930 			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
931 
932 			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
933 			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
934 			     reg);
935 			total_qps++;
936 		}
937 	}
938 }
939 
940 /**
941  * i40e_enable_vf_mappings
942  * @vf: pointer to the VF info
943  *
944  * enable VF mappings
945  **/
i40e_enable_vf_mappings(struct i40e_vf * vf)946 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
947 {
948 	struct i40e_pf *pf = vf->pf;
949 	struct i40e_hw *hw = &pf->hw;
950 	u32 reg;
951 
952 	/* Tell the hardware we're using noncontiguous mapping. HW requires
953 	 * that VF queues be mapped using this method, even when they are
954 	 * contiguous in real life
955 	 */
956 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
957 			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
958 
959 	/* enable VF vplan_qtable mappings */
960 	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
961 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
962 
963 	i40e_map_pf_to_vf_queues(vf);
964 	i40e_map_pf_queues_to_vsi(vf);
965 
966 	i40e_flush(hw);
967 }
968 
969 /**
970  * i40e_disable_vf_mappings
971  * @vf: pointer to the VF info
972  *
973  * disable VF mappings
974  **/
i40e_disable_vf_mappings(struct i40e_vf * vf)975 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
976 {
977 	struct i40e_pf *pf = vf->pf;
978 	struct i40e_hw *hw = &pf->hw;
979 	int i;
980 
981 	/* disable qp mappings */
982 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
983 	for (i = 0; i < I40E_MAX_VSI_QP; i++)
984 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
985 		     I40E_QUEUE_END_OF_LIST);
986 	i40e_flush(hw);
987 }
988 
989 /**
990  * i40e_free_vf_res
991  * @vf: pointer to the VF info
992  *
993  * free VF resources
994  **/
i40e_free_vf_res(struct i40e_vf * vf)995 static void i40e_free_vf_res(struct i40e_vf *vf)
996 {
997 	struct i40e_pf *pf = vf->pf;
998 	struct i40e_hw *hw = &pf->hw;
999 	u32 reg_idx, reg;
1000 	int i, j, msix_vf;
1001 
1002 	/* Start by disabling VF's configuration API to prevent the OS from
1003 	 * accessing the VF's VSI after it's freed / invalidated.
1004 	 */
1005 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1006 
1007 	/* It's possible the VF had requeuested more queues than the default so
1008 	 * do the accounting here when we're about to free them.
1009 	 */
1010 	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1011 		pf->queues_left += vf->num_queue_pairs -
1012 				   I40E_DEFAULT_QUEUES_PER_VF;
1013 	}
1014 
1015 	/* free vsi & disconnect it from the parent uplink */
1016 	if (vf->lan_vsi_idx) {
1017 		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1018 		vf->lan_vsi_idx = 0;
1019 		vf->lan_vsi_id = 0;
1020 	}
1021 
1022 	/* do the accounting and remove additional ADq VSI's */
1023 	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1024 		for (j = 0; j < vf->num_tc; j++) {
1025 			/* At this point VSI0 is already released so don't
1026 			 * release it again and only clear their values in
1027 			 * structure variables
1028 			 */
1029 			if (j)
1030 				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1031 			vf->ch[j].vsi_idx = 0;
1032 			vf->ch[j].vsi_id = 0;
1033 		}
1034 	}
1035 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1036 
1037 	/* disable interrupts so the VF starts in a known state */
1038 	for (i = 0; i < msix_vf; i++) {
1039 		/* format is same for both registers */
1040 		if (0 == i)
1041 			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1042 		else
1043 			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1044 						      (vf->vf_id))
1045 						     + (i - 1));
1046 		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1047 		i40e_flush(hw);
1048 	}
1049 
1050 	/* clear the irq settings */
1051 	for (i = 0; i < msix_vf; i++) {
1052 		/* format is same for both registers */
1053 		if (0 == i)
1054 			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1055 		else
1056 			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1057 						      (vf->vf_id))
1058 						     + (i - 1));
1059 		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1060 		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1061 		wr32(hw, reg_idx, reg);
1062 		i40e_flush(hw);
1063 	}
1064 	/* reset some of the state variables keeping track of the resources */
1065 	vf->num_queue_pairs = 0;
1066 	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1067 	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1068 }
1069 
1070 /**
1071  * i40e_alloc_vf_res
1072  * @vf: pointer to the VF info
1073  *
1074  * allocate VF resources
1075  **/
i40e_alloc_vf_res(struct i40e_vf * vf)1076 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1077 {
1078 	struct i40e_pf *pf = vf->pf;
1079 	int total_queue_pairs = 0;
1080 	int ret, idx;
1081 
1082 	if (vf->num_req_queues &&
1083 	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1084 		pf->num_vf_qps = vf->num_req_queues;
1085 	else
1086 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1087 
1088 	/* allocate hw vsi context & associated resources */
1089 	ret = i40e_alloc_vsi_res(vf, 0);
1090 	if (ret)
1091 		goto error_alloc;
1092 	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1093 
1094 	/* allocate additional VSIs based on tc information for ADq */
1095 	if (vf->adq_enabled) {
1096 		if (pf->queues_left >=
1097 		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1098 			/* TC 0 always belongs to VF VSI */
1099 			for (idx = 1; idx < vf->num_tc; idx++) {
1100 				ret = i40e_alloc_vsi_res(vf, idx);
1101 				if (ret)
1102 					goto error_alloc;
1103 			}
1104 			/* send correct number of queues */
1105 			total_queue_pairs = I40E_MAX_VF_QUEUES;
1106 		} else {
1107 			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1108 				 vf->vf_id);
1109 			vf->adq_enabled = false;
1110 		}
1111 	}
1112 
1113 	/* We account for each VF to get a default number of queue pairs.  If
1114 	 * the VF has now requested more, we need to account for that to make
1115 	 * certain we never request more queues than we actually have left in
1116 	 * HW.
1117 	 */
1118 	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1119 		pf->queues_left -=
1120 			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1121 
1122 	if (vf->trusted)
1123 		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1124 	else
1125 		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1126 
1127 	/* store the total qps number for the runtime
1128 	 * VF req validation
1129 	 */
1130 	vf->num_queue_pairs = total_queue_pairs;
1131 
1132 	/* VF is now completely initialized */
1133 	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1134 
1135 error_alloc:
1136 	if (ret)
1137 		i40e_free_vf_res(vf);
1138 
1139 	return ret;
1140 }
1141 
1142 #define VF_DEVICE_STATUS 0xAA
1143 #define VF_TRANS_PENDING_MASK 0x20
1144 /**
1145  * i40e_quiesce_vf_pci
1146  * @vf: pointer to the VF structure
1147  *
1148  * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1149  * if the transactions never clear.
1150  **/
i40e_quiesce_vf_pci(struct i40e_vf * vf)1151 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1152 {
1153 	struct i40e_pf *pf = vf->pf;
1154 	struct i40e_hw *hw = &pf->hw;
1155 	int vf_abs_id, i;
1156 	u32 reg;
1157 
1158 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1159 
1160 	wr32(hw, I40E_PF_PCI_CIAA,
1161 	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1162 	for (i = 0; i < 100; i++) {
1163 		reg = rd32(hw, I40E_PF_PCI_CIAD);
1164 		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1165 			return 0;
1166 		udelay(1);
1167 	}
1168 	return -EIO;
1169 }
1170 
1171 /**
1172  * __i40e_getnum_vf_vsi_vlan_filters
1173  * @vsi: pointer to the vsi
1174  *
1175  * called to get the number of VLANs offloaded on this VF
1176  **/
__i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1177 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1178 {
1179 	struct i40e_mac_filter *f;
1180 	u16 num_vlans = 0, bkt;
1181 
1182 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1183 		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1184 			num_vlans++;
1185 	}
1186 
1187 	return num_vlans;
1188 }
1189 
1190 /**
1191  * i40e_getnum_vf_vsi_vlan_filters
1192  * @vsi: pointer to the vsi
1193  *
1194  * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1195  **/
i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1196 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1197 {
1198 	int num_vlans;
1199 
1200 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1201 	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1202 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1203 
1204 	return num_vlans;
1205 }
1206 
1207 /**
1208  * i40e_get_vlan_list_sync
1209  * @vsi: pointer to the VSI
1210  * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1211  * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1212  *             This array is allocated here, but has to be freed in caller.
1213  *
1214  * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1215  **/
i40e_get_vlan_list_sync(struct i40e_vsi * vsi,u16 * num_vlans,s16 ** vlan_list)1216 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1217 				    s16 **vlan_list)
1218 {
1219 	struct i40e_mac_filter *f;
1220 	int i = 0;
1221 	int bkt;
1222 
1223 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1224 	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1225 	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1226 	if (!(*vlan_list))
1227 		goto err;
1228 
1229 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1230 		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1231 			continue;
1232 		(*vlan_list)[i++] = f->vlan;
1233 	}
1234 err:
1235 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1236 }
1237 
1238 /**
1239  * i40e_set_vsi_promisc
1240  * @vf: pointer to the VF struct
1241  * @seid: VSI number
1242  * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1243  *                for a given VLAN
1244  * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1245  *                  for a given VLAN
1246  * @vl: List of VLANs - apply filter for given VLANs
1247  * @num_vlans: Number of elements in @vl
1248  **/
1249 static i40e_status
i40e_set_vsi_promisc(struct i40e_vf * vf,u16 seid,bool multi_enable,bool unicast_enable,s16 * vl,u16 num_vlans)1250 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1251 		     bool unicast_enable, s16 *vl, u16 num_vlans)
1252 {
1253 	i40e_status aq_ret, aq_tmp = 0;
1254 	struct i40e_pf *pf = vf->pf;
1255 	struct i40e_hw *hw = &pf->hw;
1256 	int i;
1257 
1258 	/* No VLAN to set promisc on, set on VSI */
1259 	if (!num_vlans || !vl) {
1260 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1261 							       multi_enable,
1262 							       NULL);
1263 		if (aq_ret) {
1264 			int aq_err = pf->hw.aq.asq_last_status;
1265 
1266 			dev_err(&pf->pdev->dev,
1267 				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1268 				vf->vf_id,
1269 				i40e_stat_str(&pf->hw, aq_ret),
1270 				i40e_aq_str(&pf->hw, aq_err));
1271 
1272 			return aq_ret;
1273 		}
1274 
1275 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1276 							     unicast_enable,
1277 							     NULL, true);
1278 
1279 		if (aq_ret) {
1280 			int aq_err = pf->hw.aq.asq_last_status;
1281 
1282 			dev_err(&pf->pdev->dev,
1283 				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1284 				vf->vf_id,
1285 				i40e_stat_str(&pf->hw, aq_ret),
1286 				i40e_aq_str(&pf->hw, aq_err));
1287 		}
1288 
1289 		return aq_ret;
1290 	}
1291 
1292 	for (i = 0; i < num_vlans; i++) {
1293 		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1294 							    multi_enable,
1295 							    vl[i], NULL);
1296 		if (aq_ret) {
1297 			int aq_err = pf->hw.aq.asq_last_status;
1298 
1299 			dev_err(&pf->pdev->dev,
1300 				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1301 				vf->vf_id,
1302 				i40e_stat_str(&pf->hw, aq_ret),
1303 				i40e_aq_str(&pf->hw, aq_err));
1304 
1305 			if (!aq_tmp)
1306 				aq_tmp = aq_ret;
1307 		}
1308 
1309 		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1310 							    unicast_enable,
1311 							    vl[i], NULL);
1312 		if (aq_ret) {
1313 			int aq_err = pf->hw.aq.asq_last_status;
1314 
1315 			dev_err(&pf->pdev->dev,
1316 				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1317 				vf->vf_id,
1318 				i40e_stat_str(&pf->hw, aq_ret),
1319 				i40e_aq_str(&pf->hw, aq_err));
1320 
1321 			if (!aq_tmp)
1322 				aq_tmp = aq_ret;
1323 		}
1324 	}
1325 
1326 	if (aq_tmp)
1327 		aq_ret = aq_tmp;
1328 
1329 	return aq_ret;
1330 }
1331 
1332 /**
1333  * i40e_config_vf_promiscuous_mode
1334  * @vf: pointer to the VF info
1335  * @vsi_id: VSI id
1336  * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1337  * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1338  *
1339  * Called from the VF to configure the promiscuous mode of
1340  * VF vsis and from the VF reset path to reset promiscuous mode.
1341  **/
i40e_config_vf_promiscuous_mode(struct i40e_vf * vf,u16 vsi_id,bool allmulti,bool alluni)1342 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1343 						   u16 vsi_id,
1344 						   bool allmulti,
1345 						   bool alluni)
1346 {
1347 	i40e_status aq_ret = I40E_SUCCESS;
1348 	struct i40e_pf *pf = vf->pf;
1349 	struct i40e_vsi *vsi;
1350 	u16 num_vlans;
1351 	s16 *vl;
1352 
1353 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1354 	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1355 		return I40E_ERR_PARAM;
1356 
1357 	if (vf->port_vlan_id) {
1358 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1359 					      alluni, &vf->port_vlan_id, 1);
1360 		return aq_ret;
1361 	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1362 		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1363 
1364 		if (!vl)
1365 			return I40E_ERR_NO_MEMORY;
1366 
1367 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1368 					      vl, num_vlans);
1369 		kfree(vl);
1370 		return aq_ret;
1371 	}
1372 
1373 	/* no VLANs to set on, set on VSI */
1374 	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1375 				      NULL, 0);
1376 	return aq_ret;
1377 }
1378 
1379 /**
1380  * i40e_sync_vfr_reset
1381  * @hw: pointer to hw struct
1382  * @vf_id: VF identifier
1383  *
1384  * Before trigger hardware reset, we need to know if no other process has
1385  * reserved the hardware for any reset operations. This check is done by
1386  * examining the status of the RSTAT1 register used to signal the reset.
1387  **/
i40e_sync_vfr_reset(struct i40e_hw * hw,int vf_id)1388 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1389 {
1390 	u32 reg;
1391 	int i;
1392 
1393 	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1394 		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1395 			   I40E_VFINT_ICR0_ADMINQ_MASK;
1396 		if (reg)
1397 			return 0;
1398 
1399 		usleep_range(100, 200);
1400 	}
1401 
1402 	return -EAGAIN;
1403 }
1404 
1405 /**
1406  * i40e_trigger_vf_reset
1407  * @vf: pointer to the VF structure
1408  * @flr: VFLR was issued or not
1409  *
1410  * Trigger hardware to start a reset for a particular VF. Expects the caller
1411  * to wait the proper amount of time to allow hardware to reset the VF before
1412  * it cleans up and restores VF functionality.
1413  **/
i40e_trigger_vf_reset(struct i40e_vf * vf,bool flr)1414 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1415 {
1416 	struct i40e_pf *pf = vf->pf;
1417 	struct i40e_hw *hw = &pf->hw;
1418 	u32 reg, reg_idx, bit_idx;
1419 	bool vf_active;
1420 	u32 radq;
1421 
1422 	/* warn the VF */
1423 	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1424 
1425 	/* Disable VF's configuration API during reset. The flag is re-enabled
1426 	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1427 	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1428 	 * to do it earlier to give some time to finish to any VF config
1429 	 * functions that may still be running at this point.
1430 	 */
1431 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1432 
1433 	/* In the case of a VFLR, the HW has already reset the VF and we
1434 	 * just need to clean up, so don't hit the VFRTRIG register.
1435 	 */
1436 	if (!flr) {
1437 		/* Sync VFR reset before trigger next one */
1438 		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1439 			    I40E_VFINT_ICR0_ADMINQ_MASK;
1440 		if (vf_active && !radq)
1441 			/* waiting for finish reset by virtual driver */
1442 			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1443 				dev_info(&pf->pdev->dev,
1444 					 "Reset VF %d never finished\n",
1445 				vf->vf_id);
1446 
1447 		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1448 		 * in progress state in rstat1 register.
1449 		 */
1450 		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1451 		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1452 		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1453 		i40e_flush(hw);
1454 	}
1455 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1456 	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1457 	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1458 	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1459 	i40e_flush(hw);
1460 
1461 	if (i40e_quiesce_vf_pci(vf))
1462 		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1463 			vf->vf_id);
1464 }
1465 
1466 /**
1467  * i40e_cleanup_reset_vf
1468  * @vf: pointer to the VF structure
1469  *
1470  * Cleanup a VF after the hardware reset is finished. Expects the caller to
1471  * have verified whether the reset is finished properly, and ensure the
1472  * minimum amount of wait time has passed.
1473  **/
i40e_cleanup_reset_vf(struct i40e_vf * vf)1474 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1475 {
1476 	struct i40e_pf *pf = vf->pf;
1477 	struct i40e_hw *hw = &pf->hw;
1478 	u32 reg;
1479 
1480 	/* disable promisc modes in case they were enabled */
1481 	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1482 
1483 	/* free VF resources to begin resetting the VSI state */
1484 	i40e_free_vf_res(vf);
1485 
1486 	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1487 	 * By doing this we allow HW to access VF memory at any point. If we
1488 	 * did it any sooner, HW could access memory while it was being freed
1489 	 * in i40e_free_vf_res(), causing an IOMMU fault.
1490 	 *
1491 	 * On the other hand, this needs to be done ASAP, because the VF driver
1492 	 * is waiting for this to happen and may report a timeout. It's
1493 	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1494 	 * it.
1495 	 */
1496 	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1497 	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1498 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1499 
1500 	/* reallocate VF resources to finish resetting the VSI state */
1501 	if (!i40e_alloc_vf_res(vf)) {
1502 		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1503 		i40e_enable_vf_mappings(vf);
1504 		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1505 		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1506 		/* Do not notify the client during VF init */
1507 		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1508 					&vf->vf_states))
1509 			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1510 		vf->num_vlan = 0;
1511 	}
1512 
1513 	/* Tell the VF driver the reset is done. This needs to be done only
1514 	 * after VF has been fully initialized, because the VF driver may
1515 	 * request resources immediately after setting this flag.
1516 	 */
1517 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1518 }
1519 
1520 /**
1521  * i40e_reset_vf
1522  * @vf: pointer to the VF structure
1523  * @flr: VFLR was issued or not
1524  *
1525  * Returns true if the VF is in reset, resets successfully, or resets
1526  * are disabled and false otherwise.
1527  **/
i40e_reset_vf(struct i40e_vf * vf,bool flr)1528 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1529 {
1530 	struct i40e_pf *pf = vf->pf;
1531 	struct i40e_hw *hw = &pf->hw;
1532 	bool rsd = false;
1533 	u32 reg;
1534 	int i;
1535 
1536 	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1537 		return true;
1538 
1539 	/* If the VFs have been disabled, this means something else is
1540 	 * resetting the VF, so we shouldn't continue.
1541 	 */
1542 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1543 		return true;
1544 
1545 	i40e_trigger_vf_reset(vf, flr);
1546 
1547 	/* poll VPGEN_VFRSTAT reg to make sure
1548 	 * that reset is complete
1549 	 */
1550 	for (i = 0; i < 10; i++) {
1551 		/* VF reset requires driver to first reset the VF and then
1552 		 * poll the status register to make sure that the reset
1553 		 * completed successfully. Due to internal HW FIFO flushes,
1554 		 * we must wait 10ms before the register will be valid.
1555 		 */
1556 		usleep_range(10000, 20000);
1557 		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1558 		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1559 			rsd = true;
1560 			break;
1561 		}
1562 	}
1563 
1564 	if (flr)
1565 		usleep_range(10000, 20000);
1566 
1567 	if (!rsd)
1568 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1569 			vf->vf_id);
1570 	usleep_range(10000, 20000);
1571 
1572 	/* On initial reset, we don't have any queues to disable */
1573 	if (vf->lan_vsi_idx != 0)
1574 		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1575 
1576 	i40e_cleanup_reset_vf(vf);
1577 
1578 	i40e_flush(hw);
1579 	clear_bit(__I40E_VF_DISABLE, pf->state);
1580 
1581 	return true;
1582 }
1583 
1584 /**
1585  * i40e_reset_all_vfs
1586  * @pf: pointer to the PF structure
1587  * @flr: VFLR was issued or not
1588  *
1589  * Reset all allocated VFs in one go. First, tell the hardware to reset each
1590  * VF, then do all the waiting in one chunk, and finally finish restoring each
1591  * VF after the wait. This is useful during PF routines which need to reset
1592  * all VFs, as otherwise it must perform these resets in a serialized fashion.
1593  *
1594  * Returns true if any VFs were reset, and false otherwise.
1595  **/
i40e_reset_all_vfs(struct i40e_pf * pf,bool flr)1596 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1597 {
1598 	struct i40e_hw *hw = &pf->hw;
1599 	struct i40e_vf *vf;
1600 	int i, v;
1601 	u32 reg;
1602 
1603 	/* If we don't have any VFs, then there is nothing to reset */
1604 	if (!pf->num_alloc_vfs)
1605 		return false;
1606 
1607 	/* If VFs have been disabled, there is no need to reset */
1608 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1609 		return false;
1610 
1611 	/* Begin reset on all VFs at once */
1612 	for (v = 0; v < pf->num_alloc_vfs; v++)
1613 		i40e_trigger_vf_reset(&pf->vf[v], flr);
1614 
1615 	/* HW requires some time to make sure it can flush the FIFO for a VF
1616 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1617 	 * sequence to make sure that it has completed. We'll keep track of
1618 	 * the VFs using a simple iterator that increments once that VF has
1619 	 * finished resetting.
1620 	 */
1621 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1622 		usleep_range(10000, 20000);
1623 
1624 		/* Check each VF in sequence, beginning with the VF to fail
1625 		 * the previous check.
1626 		 */
1627 		while (v < pf->num_alloc_vfs) {
1628 			vf = &pf->vf[v];
1629 			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1630 			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1631 				break;
1632 
1633 			/* If the current VF has finished resetting, move on
1634 			 * to the next VF in sequence.
1635 			 */
1636 			v++;
1637 		}
1638 	}
1639 
1640 	if (flr)
1641 		usleep_range(10000, 20000);
1642 
1643 	/* Display a warning if at least one VF didn't manage to reset in
1644 	 * time, but continue on with the operation.
1645 	 */
1646 	if (v < pf->num_alloc_vfs)
1647 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1648 			pf->vf[v].vf_id);
1649 	usleep_range(10000, 20000);
1650 
1651 	/* Begin disabling all the rings associated with VFs, but do not wait
1652 	 * between each VF.
1653 	 */
1654 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1655 		/* On initial reset, we don't have any queues to disable */
1656 		if (pf->vf[v].lan_vsi_idx == 0)
1657 			continue;
1658 
1659 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1660 	}
1661 
1662 	/* Now that we've notified HW to disable all of the VF rings, wait
1663 	 * until they finish.
1664 	 */
1665 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1666 		/* On initial reset, we don't have any queues to disable */
1667 		if (pf->vf[v].lan_vsi_idx == 0)
1668 			continue;
1669 
1670 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1671 	}
1672 
1673 	/* Hw may need up to 50ms to finish disabling the RX queues. We
1674 	 * minimize the wait by delaying only once for all VFs.
1675 	 */
1676 	mdelay(50);
1677 
1678 	/* Finish the reset on each VF */
1679 	for (v = 0; v < pf->num_alloc_vfs; v++)
1680 		i40e_cleanup_reset_vf(&pf->vf[v]);
1681 
1682 	i40e_flush(hw);
1683 	clear_bit(__I40E_VF_DISABLE, pf->state);
1684 
1685 	return true;
1686 }
1687 
1688 /**
1689  * i40e_free_vfs
1690  * @pf: pointer to the PF structure
1691  *
1692  * free VF resources
1693  **/
i40e_free_vfs(struct i40e_pf * pf)1694 void i40e_free_vfs(struct i40e_pf *pf)
1695 {
1696 	struct i40e_hw *hw = &pf->hw;
1697 	u32 reg_idx, bit_idx;
1698 	int i, tmp, vf_id;
1699 
1700 	if (!pf->vf)
1701 		return;
1702 
1703 	set_bit(__I40E_VFS_RELEASING, pf->state);
1704 	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1705 		usleep_range(1000, 2000);
1706 
1707 	i40e_notify_client_of_vf_enable(pf, 0);
1708 
1709 	/* Disable IOV before freeing resources. This lets any VF drivers
1710 	 * running in the host get themselves cleaned up before we yank
1711 	 * the carpet out from underneath their feet.
1712 	 */
1713 	if (!pci_vfs_assigned(pf->pdev))
1714 		pci_disable_sriov(pf->pdev);
1715 	else
1716 		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1717 
1718 	/* Amortize wait time by stopping all VFs at the same time */
1719 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1720 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1721 			continue;
1722 
1723 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1724 	}
1725 
1726 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1727 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1728 			continue;
1729 
1730 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1731 	}
1732 
1733 	/* free up VF resources */
1734 	tmp = pf->num_alloc_vfs;
1735 	pf->num_alloc_vfs = 0;
1736 	for (i = 0; i < tmp; i++) {
1737 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1738 			i40e_free_vf_res(&pf->vf[i]);
1739 		/* disable qp mappings */
1740 		i40e_disable_vf_mappings(&pf->vf[i]);
1741 	}
1742 
1743 	kfree(pf->vf);
1744 	pf->vf = NULL;
1745 
1746 	/* This check is for when the driver is unloaded while VFs are
1747 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1748 	 * before this function ever gets called.
1749 	 */
1750 	if (!pci_vfs_assigned(pf->pdev)) {
1751 		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1752 		 * work correctly when SR-IOV gets re-enabled.
1753 		 */
1754 		for (vf_id = 0; vf_id < tmp; vf_id++) {
1755 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1756 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1757 			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1758 		}
1759 	}
1760 	clear_bit(__I40E_VF_DISABLE, pf->state);
1761 	clear_bit(__I40E_VFS_RELEASING, pf->state);
1762 }
1763 
1764 #ifdef CONFIG_PCI_IOV
1765 /**
1766  * i40e_alloc_vfs
1767  * @pf: pointer to the PF structure
1768  * @num_alloc_vfs: number of VFs to allocate
1769  *
1770  * allocate VF resources
1771  **/
i40e_alloc_vfs(struct i40e_pf * pf,u16 num_alloc_vfs)1772 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1773 {
1774 	struct i40e_vf *vfs;
1775 	int i, ret = 0;
1776 
1777 	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1778 	i40e_irq_dynamic_disable_icr0(pf);
1779 
1780 	/* Check to see if we're just allocating resources for extant VFs */
1781 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1782 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1783 		if (ret) {
1784 			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1785 			pf->num_alloc_vfs = 0;
1786 			goto err_iov;
1787 		}
1788 	}
1789 	/* allocate memory */
1790 	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1791 	if (!vfs) {
1792 		ret = -ENOMEM;
1793 		goto err_alloc;
1794 	}
1795 	pf->vf = vfs;
1796 
1797 	/* apply default profile */
1798 	for (i = 0; i < num_alloc_vfs; i++) {
1799 		vfs[i].pf = pf;
1800 		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1801 		vfs[i].vf_id = i;
1802 
1803 		/* assign default capabilities */
1804 		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1805 		vfs[i].spoofchk = true;
1806 
1807 		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1808 
1809 	}
1810 	pf->num_alloc_vfs = num_alloc_vfs;
1811 
1812 	/* VF resources get allocated during reset */
1813 	i40e_reset_all_vfs(pf, false);
1814 
1815 	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1816 
1817 err_alloc:
1818 	if (ret)
1819 		i40e_free_vfs(pf);
1820 err_iov:
1821 	/* Re-enable interrupt 0. */
1822 	i40e_irq_dynamic_enable_icr0(pf);
1823 	return ret;
1824 }
1825 
1826 #endif
1827 /**
1828  * i40e_pci_sriov_enable
1829  * @pdev: pointer to a pci_dev structure
1830  * @num_vfs: number of VFs to allocate
1831  *
1832  * Enable or change the number of VFs
1833  **/
i40e_pci_sriov_enable(struct pci_dev * pdev,int num_vfs)1834 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1835 {
1836 #ifdef CONFIG_PCI_IOV
1837 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1838 	int pre_existing_vfs = pci_num_vf(pdev);
1839 	int err = 0;
1840 
1841 	if (test_bit(__I40E_TESTING, pf->state)) {
1842 		dev_warn(&pdev->dev,
1843 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1844 		err = -EPERM;
1845 		goto err_out;
1846 	}
1847 
1848 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1849 		i40e_free_vfs(pf);
1850 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1851 		goto out;
1852 
1853 	if (num_vfs > pf->num_req_vfs) {
1854 		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1855 			 num_vfs, pf->num_req_vfs);
1856 		err = -EPERM;
1857 		goto err_out;
1858 	}
1859 
1860 	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1861 	err = i40e_alloc_vfs(pf, num_vfs);
1862 	if (err) {
1863 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1864 		goto err_out;
1865 	}
1866 
1867 out:
1868 	return num_vfs;
1869 
1870 err_out:
1871 	return err;
1872 #endif
1873 	return 0;
1874 }
1875 
1876 /**
1877  * i40e_pci_sriov_configure
1878  * @pdev: pointer to a pci_dev structure
1879  * @num_vfs: number of VFs to allocate
1880  *
1881  * Enable or change the number of VFs. Called when the user updates the number
1882  * of VFs in sysfs.
1883  **/
i40e_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)1884 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1885 {
1886 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1887 	int ret = 0;
1888 
1889 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1890 		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1891 		return -EAGAIN;
1892 	}
1893 
1894 	if (num_vfs) {
1895 		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1896 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1897 			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1898 		}
1899 		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1900 		goto sriov_configure_out;
1901 	}
1902 
1903 	if (!pci_vfs_assigned(pf->pdev)) {
1904 		i40e_free_vfs(pf);
1905 		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1906 		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1907 	} else {
1908 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1909 		ret = -EINVAL;
1910 		goto sriov_configure_out;
1911 	}
1912 sriov_configure_out:
1913 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1914 	return ret;
1915 }
1916 
1917 /***********************virtual channel routines******************/
1918 
1919 /**
1920  * i40e_vc_send_msg_to_vf
1921  * @vf: pointer to the VF info
1922  * @v_opcode: virtual channel opcode
1923  * @v_retval: virtual channel return value
1924  * @msg: pointer to the msg buffer
1925  * @msglen: msg length
1926  *
1927  * send msg to VF
1928  **/
i40e_vc_send_msg_to_vf(struct i40e_vf * vf,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen)1929 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1930 				  u32 v_retval, u8 *msg, u16 msglen)
1931 {
1932 	struct i40e_pf *pf;
1933 	struct i40e_hw *hw;
1934 	int abs_vf_id;
1935 	i40e_status aq_ret;
1936 
1937 	/* validate the request */
1938 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1939 		return -EINVAL;
1940 
1941 	pf = vf->pf;
1942 	hw = &pf->hw;
1943 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1944 
1945 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1946 					msg, msglen, NULL);
1947 	if (aq_ret) {
1948 		dev_info(&pf->pdev->dev,
1949 			 "Unable to send the message to VF %d aq_err %d\n",
1950 			 vf->vf_id, pf->hw.aq.asq_last_status);
1951 		return -EIO;
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 /**
1958  * i40e_vc_send_resp_to_vf
1959  * @vf: pointer to the VF info
1960  * @opcode: operation code
1961  * @retval: return value
1962  *
1963  * send resp msg to VF
1964  **/
i40e_vc_send_resp_to_vf(struct i40e_vf * vf,enum virtchnl_ops opcode,i40e_status retval)1965 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1966 				   enum virtchnl_ops opcode,
1967 				   i40e_status retval)
1968 {
1969 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1970 }
1971 
1972 /**
1973  * i40e_sync_vf_state
1974  * @vf: pointer to the VF info
1975  * @state: VF state
1976  *
1977  * Called from a VF message to synchronize the service with a potential
1978  * VF reset state
1979  **/
i40e_sync_vf_state(struct i40e_vf * vf,enum i40e_vf_states state)1980 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
1981 {
1982 	int i;
1983 
1984 	/* When handling some messages, it needs VF state to be set.
1985 	 * It is possible that this flag is cleared during VF reset,
1986 	 * so there is a need to wait until the end of the reset to
1987 	 * handle the request message correctly.
1988 	 */
1989 	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
1990 		if (test_bit(state, &vf->vf_states))
1991 			return true;
1992 		usleep_range(10000, 20000);
1993 	}
1994 
1995 	return test_bit(state, &vf->vf_states);
1996 }
1997 
1998 /**
1999  * i40e_vc_get_version_msg
2000  * @vf: pointer to the VF info
2001  * @msg: pointer to the msg buffer
2002  *
2003  * called from the VF to request the API version used by the PF
2004  **/
i40e_vc_get_version_msg(struct i40e_vf * vf,u8 * msg)2005 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2006 {
2007 	struct virtchnl_version_info info = {
2008 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2009 	};
2010 
2011 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2012 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2013 	if (VF_IS_V10(&vf->vf_ver))
2014 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2015 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2016 				      I40E_SUCCESS, (u8 *)&info,
2017 				      sizeof(struct virtchnl_version_info));
2018 }
2019 
2020 /**
2021  * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2022  * @vf: pointer to VF structure
2023  **/
i40e_del_qch(struct i40e_vf * vf)2024 static void i40e_del_qch(struct i40e_vf *vf)
2025 {
2026 	struct i40e_pf *pf = vf->pf;
2027 	int i;
2028 
2029 	/* first element in the array belongs to primary VF VSI and we shouldn't
2030 	 * delete it. We should however delete the rest of the VSIs created
2031 	 */
2032 	for (i = 1; i < vf->num_tc; i++) {
2033 		if (vf->ch[i].vsi_idx) {
2034 			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2035 			vf->ch[i].vsi_idx = 0;
2036 			vf->ch[i].vsi_id = 0;
2037 		}
2038 	}
2039 }
2040 
2041 /**
2042  * i40e_vc_get_vf_resources_msg
2043  * @vf: pointer to the VF info
2044  * @msg: pointer to the msg buffer
2045  *
2046  * called from the VF to request its resources
2047  **/
i40e_vc_get_vf_resources_msg(struct i40e_vf * vf,u8 * msg)2048 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2049 {
2050 	struct virtchnl_vf_resource *vfres = NULL;
2051 	struct i40e_pf *pf = vf->pf;
2052 	i40e_status aq_ret = 0;
2053 	struct i40e_vsi *vsi;
2054 	int num_vsis = 1;
2055 	size_t len = 0;
2056 	int ret;
2057 
2058 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2059 		aq_ret = I40E_ERR_PARAM;
2060 		goto err;
2061 	}
2062 
2063 	len = struct_size(vfres, vsi_res, num_vsis);
2064 	vfres = kzalloc(len, GFP_KERNEL);
2065 	if (!vfres) {
2066 		aq_ret = I40E_ERR_NO_MEMORY;
2067 		len = 0;
2068 		goto err;
2069 	}
2070 	if (VF_IS_V11(&vf->vf_ver))
2071 		vf->driver_caps = *(u32 *)msg;
2072 	else
2073 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2074 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2075 				  VIRTCHNL_VF_OFFLOAD_VLAN;
2076 
2077 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2078 	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2079 	vsi = pf->vsi[vf->lan_vsi_idx];
2080 	if (!vsi->info.pvid)
2081 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2082 
2083 	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2084 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2085 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2086 		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2087 	} else {
2088 		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2089 	}
2090 
2091 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2092 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2093 	} else {
2094 		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2095 		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2096 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2097 		else
2098 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2099 	}
2100 
2101 	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2102 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2103 			vfres->vf_cap_flags |=
2104 				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2105 	}
2106 
2107 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2108 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2109 
2110 	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2111 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2112 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2113 
2114 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2115 		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2116 			dev_err(&pf->pdev->dev,
2117 				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2118 				 vf->vf_id);
2119 			aq_ret = I40E_ERR_PARAM;
2120 			goto err;
2121 		}
2122 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2123 	}
2124 
2125 	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2126 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2127 			vfres->vf_cap_flags |=
2128 					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2129 	}
2130 
2131 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2132 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2133 
2134 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2135 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2136 
2137 	vfres->num_vsis = num_vsis;
2138 	vfres->num_queue_pairs = vf->num_queue_pairs;
2139 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2140 	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2141 	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2142 
2143 	if (vf->lan_vsi_idx) {
2144 		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2145 		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2146 		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2147 		/* VFs only use TC 0 */
2148 		vfres->vsi_res[0].qset_handle
2149 					  = le16_to_cpu(vsi->info.qs_handle[0]);
2150 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2151 			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2152 			eth_zero_addr(vf->default_lan_addr.addr);
2153 		}
2154 		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2155 				vf->default_lan_addr.addr);
2156 	}
2157 	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2158 
2159 err:
2160 	/* send the response back to the VF */
2161 	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2162 				     aq_ret, (u8 *)vfres, len);
2163 
2164 	kfree(vfres);
2165 	return ret;
2166 }
2167 
2168 /**
2169  * i40e_vc_config_promiscuous_mode_msg
2170  * @vf: pointer to the VF info
2171  * @msg: pointer to the msg buffer
2172  *
2173  * called from the VF to configure the promiscuous mode of
2174  * VF vsis
2175  **/
i40e_vc_config_promiscuous_mode_msg(struct i40e_vf * vf,u8 * msg)2176 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2177 {
2178 	struct virtchnl_promisc_info *info =
2179 	    (struct virtchnl_promisc_info *)msg;
2180 	struct i40e_pf *pf = vf->pf;
2181 	i40e_status aq_ret = 0;
2182 	bool allmulti = false;
2183 	bool alluni = false;
2184 
2185 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2186 		aq_ret = I40E_ERR_PARAM;
2187 		goto err_out;
2188 	}
2189 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2190 		dev_err(&pf->pdev->dev,
2191 			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2192 			vf->vf_id);
2193 
2194 		/* Lie to the VF on purpose, because this is an error we can
2195 		 * ignore. Unprivileged VF is not a virtual channel error.
2196 		 */
2197 		aq_ret = 0;
2198 		goto err_out;
2199 	}
2200 
2201 	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2202 		aq_ret = I40E_ERR_PARAM;
2203 		goto err_out;
2204 	}
2205 
2206 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2207 		aq_ret = I40E_ERR_PARAM;
2208 		goto err_out;
2209 	}
2210 
2211 	/* Multicast promiscuous handling*/
2212 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2213 		allmulti = true;
2214 
2215 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2216 		alluni = true;
2217 	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2218 						 alluni);
2219 	if (aq_ret)
2220 		goto err_out;
2221 
2222 	if (allmulti) {
2223 		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2224 				      &vf->vf_states))
2225 			dev_info(&pf->pdev->dev,
2226 				 "VF %d successfully set multicast promiscuous mode\n",
2227 				 vf->vf_id);
2228 	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2229 				      &vf->vf_states))
2230 		dev_info(&pf->pdev->dev,
2231 			 "VF %d successfully unset multicast promiscuous mode\n",
2232 			 vf->vf_id);
2233 
2234 	if (alluni) {
2235 		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2236 				      &vf->vf_states))
2237 			dev_info(&pf->pdev->dev,
2238 				 "VF %d successfully set unicast promiscuous mode\n",
2239 				 vf->vf_id);
2240 	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2241 				      &vf->vf_states))
2242 		dev_info(&pf->pdev->dev,
2243 			 "VF %d successfully unset unicast promiscuous mode\n",
2244 			 vf->vf_id);
2245 
2246 err_out:
2247 	/* send the response to the VF */
2248 	return i40e_vc_send_resp_to_vf(vf,
2249 				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2250 				       aq_ret);
2251 }
2252 
2253 /**
2254  * i40e_vc_config_queues_msg
2255  * @vf: pointer to the VF info
2256  * @msg: pointer to the msg buffer
2257  *
2258  * called from the VF to configure the rx/tx
2259  * queues
2260  **/
i40e_vc_config_queues_msg(struct i40e_vf * vf,u8 * msg)2261 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2262 {
2263 	struct virtchnl_vsi_queue_config_info *qci =
2264 	    (struct virtchnl_vsi_queue_config_info *)msg;
2265 	struct virtchnl_queue_pair_info *qpi;
2266 	u16 vsi_id, vsi_queue_id = 0;
2267 	struct i40e_pf *pf = vf->pf;
2268 	i40e_status aq_ret = 0;
2269 	int i, j = 0, idx = 0;
2270 	struct i40e_vsi *vsi;
2271 	u16 num_qps_all = 0;
2272 
2273 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2274 		aq_ret = I40E_ERR_PARAM;
2275 		goto error_param;
2276 	}
2277 
2278 	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2279 		aq_ret = I40E_ERR_PARAM;
2280 		goto error_param;
2281 	}
2282 
2283 	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2284 		aq_ret = I40E_ERR_PARAM;
2285 		goto error_param;
2286 	}
2287 
2288 	if (vf->adq_enabled) {
2289 		for (i = 0; i < vf->num_tc; i++)
2290 			num_qps_all += vf->ch[i].num_qps;
2291 		if (num_qps_all != qci->num_queue_pairs) {
2292 			aq_ret = I40E_ERR_PARAM;
2293 			goto error_param;
2294 		}
2295 	}
2296 
2297 	vsi_id = qci->vsi_id;
2298 
2299 	for (i = 0; i < qci->num_queue_pairs; i++) {
2300 		qpi = &qci->qpair[i];
2301 
2302 		if (!vf->adq_enabled) {
2303 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2304 						      qpi->txq.queue_id)) {
2305 				aq_ret = I40E_ERR_PARAM;
2306 				goto error_param;
2307 			}
2308 
2309 			vsi_queue_id = qpi->txq.queue_id;
2310 
2311 			if (qpi->txq.vsi_id != qci->vsi_id ||
2312 			    qpi->rxq.vsi_id != qci->vsi_id ||
2313 			    qpi->rxq.queue_id != vsi_queue_id) {
2314 				aq_ret = I40E_ERR_PARAM;
2315 				goto error_param;
2316 			}
2317 		}
2318 
2319 		if (vf->adq_enabled) {
2320 			if (idx >= ARRAY_SIZE(vf->ch)) {
2321 				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2322 				goto error_param;
2323 			}
2324 			vsi_id = vf->ch[idx].vsi_id;
2325 		}
2326 
2327 		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2328 					     &qpi->rxq) ||
2329 		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2330 					     &qpi->txq)) {
2331 			aq_ret = I40E_ERR_PARAM;
2332 			goto error_param;
2333 		}
2334 
2335 		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2336 		 * VF does not know about these additional VSIs and all
2337 		 * it cares is about its own queues. PF configures these queues
2338 		 * to its appropriate VSIs based on TC mapping
2339 		 */
2340 		if (vf->adq_enabled) {
2341 			if (idx >= ARRAY_SIZE(vf->ch)) {
2342 				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2343 				goto error_param;
2344 			}
2345 			if (j == (vf->ch[idx].num_qps - 1)) {
2346 				idx++;
2347 				j = 0; /* resetting the queue count */
2348 				vsi_queue_id = 0;
2349 			} else {
2350 				j++;
2351 				vsi_queue_id++;
2352 			}
2353 		}
2354 	}
2355 	/* set vsi num_queue_pairs in use to num configured by VF */
2356 	if (!vf->adq_enabled) {
2357 		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2358 			qci->num_queue_pairs;
2359 	} else {
2360 		for (i = 0; i < vf->num_tc; i++) {
2361 			vsi = pf->vsi[vf->ch[i].vsi_idx];
2362 			vsi->num_queue_pairs = vf->ch[i].num_qps;
2363 
2364 			if (i40e_update_adq_vsi_queues(vsi, i)) {
2365 				aq_ret = I40E_ERR_CONFIG;
2366 				goto error_param;
2367 			}
2368 		}
2369 	}
2370 
2371 error_param:
2372 	/* send the response to the VF */
2373 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2374 				       aq_ret);
2375 }
2376 
2377 /**
2378  * i40e_validate_queue_map - check queue map is valid
2379  * @vf: the VF structure pointer
2380  * @vsi_id: vsi id
2381  * @queuemap: Tx or Rx queue map
2382  *
2383  * check if Tx or Rx queue map is valid
2384  **/
i40e_validate_queue_map(struct i40e_vf * vf,u16 vsi_id,unsigned long queuemap)2385 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2386 				   unsigned long queuemap)
2387 {
2388 	u16 vsi_queue_id, queue_id;
2389 
2390 	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2391 		if (vf->adq_enabled) {
2392 			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2393 			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2394 		} else {
2395 			queue_id = vsi_queue_id;
2396 		}
2397 
2398 		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2399 			return -EINVAL;
2400 	}
2401 
2402 	return 0;
2403 }
2404 
2405 /**
2406  * i40e_vc_config_irq_map_msg
2407  * @vf: pointer to the VF info
2408  * @msg: pointer to the msg buffer
2409  *
2410  * called from the VF to configure the irq to
2411  * queue map
2412  **/
i40e_vc_config_irq_map_msg(struct i40e_vf * vf,u8 * msg)2413 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2414 {
2415 	struct virtchnl_irq_map_info *irqmap_info =
2416 	    (struct virtchnl_irq_map_info *)msg;
2417 	struct virtchnl_vector_map *map;
2418 	u16 vsi_id;
2419 	i40e_status aq_ret = 0;
2420 	int i;
2421 
2422 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2423 		aq_ret = I40E_ERR_PARAM;
2424 		goto error_param;
2425 	}
2426 
2427 	if (irqmap_info->num_vectors >
2428 	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2429 		aq_ret = I40E_ERR_PARAM;
2430 		goto error_param;
2431 	}
2432 
2433 	for (i = 0; i < irqmap_info->num_vectors; i++) {
2434 		map = &irqmap_info->vecmap[i];
2435 		/* validate msg params */
2436 		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2437 		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2438 			aq_ret = I40E_ERR_PARAM;
2439 			goto error_param;
2440 		}
2441 		vsi_id = map->vsi_id;
2442 
2443 		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2444 			aq_ret = I40E_ERR_PARAM;
2445 			goto error_param;
2446 		}
2447 
2448 		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2449 			aq_ret = I40E_ERR_PARAM;
2450 			goto error_param;
2451 		}
2452 
2453 		i40e_config_irq_link_list(vf, vsi_id, map);
2454 	}
2455 error_param:
2456 	/* send the response to the VF */
2457 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2458 				       aq_ret);
2459 }
2460 
2461 /**
2462  * i40e_ctrl_vf_tx_rings
2463  * @vsi: the SRIOV VSI being configured
2464  * @q_map: bit map of the queues to be enabled
2465  * @enable: start or stop the queue
2466  **/
i40e_ctrl_vf_tx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2467 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2468 				 bool enable)
2469 {
2470 	struct i40e_pf *pf = vsi->back;
2471 	int ret = 0;
2472 	u16 q_id;
2473 
2474 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2475 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2476 					     vsi->base_queue + q_id,
2477 					     false /*is xdp*/, enable);
2478 		if (ret)
2479 			break;
2480 	}
2481 	return ret;
2482 }
2483 
2484 /**
2485  * i40e_ctrl_vf_rx_rings
2486  * @vsi: the SRIOV VSI being configured
2487  * @q_map: bit map of the queues to be enabled
2488  * @enable: start or stop the queue
2489  **/
i40e_ctrl_vf_rx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2490 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2491 				 bool enable)
2492 {
2493 	struct i40e_pf *pf = vsi->back;
2494 	int ret = 0;
2495 	u16 q_id;
2496 
2497 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2498 		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2499 					     enable);
2500 		if (ret)
2501 			break;
2502 	}
2503 	return ret;
2504 }
2505 
2506 /**
2507  * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2508  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2509  *
2510  * Returns true if validation was successful, else false.
2511  */
i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2512 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2513 {
2514 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2515 	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2516 	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2517 		return false;
2518 
2519 	return true;
2520 }
2521 
2522 /**
2523  * i40e_vc_enable_queues_msg
2524  * @vf: pointer to the VF info
2525  * @msg: pointer to the msg buffer
2526  *
2527  * called from the VF to enable all or specific queue(s)
2528  **/
i40e_vc_enable_queues_msg(struct i40e_vf * vf,u8 * msg)2529 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2530 {
2531 	struct virtchnl_queue_select *vqs =
2532 	    (struct virtchnl_queue_select *)msg;
2533 	struct i40e_pf *pf = vf->pf;
2534 	i40e_status aq_ret = 0;
2535 	int i;
2536 
2537 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2538 		aq_ret = I40E_ERR_PARAM;
2539 		goto error_param;
2540 	}
2541 
2542 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2543 		aq_ret = I40E_ERR_PARAM;
2544 		goto error_param;
2545 	}
2546 
2547 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2548 		aq_ret = I40E_ERR_PARAM;
2549 		goto error_param;
2550 	}
2551 
2552 	/* Use the queue bit map sent by the VF */
2553 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2554 				  true)) {
2555 		aq_ret = I40E_ERR_TIMEOUT;
2556 		goto error_param;
2557 	}
2558 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2559 				  true)) {
2560 		aq_ret = I40E_ERR_TIMEOUT;
2561 		goto error_param;
2562 	}
2563 
2564 	/* need to start the rings for additional ADq VSI's as well */
2565 	if (vf->adq_enabled) {
2566 		/* zero belongs to LAN VSI */
2567 		for (i = 1; i < vf->num_tc; i++) {
2568 			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2569 				aq_ret = I40E_ERR_TIMEOUT;
2570 		}
2571 	}
2572 
2573 error_param:
2574 	/* send the response to the VF */
2575 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2576 				       aq_ret);
2577 }
2578 
2579 /**
2580  * i40e_vc_disable_queues_msg
2581  * @vf: pointer to the VF info
2582  * @msg: pointer to the msg buffer
2583  *
2584  * called from the VF to disable all or specific
2585  * queue(s)
2586  **/
i40e_vc_disable_queues_msg(struct i40e_vf * vf,u8 * msg)2587 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2588 {
2589 	struct virtchnl_queue_select *vqs =
2590 	    (struct virtchnl_queue_select *)msg;
2591 	struct i40e_pf *pf = vf->pf;
2592 	i40e_status aq_ret = 0;
2593 
2594 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2595 		aq_ret = I40E_ERR_PARAM;
2596 		goto error_param;
2597 	}
2598 
2599 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2600 		aq_ret = I40E_ERR_PARAM;
2601 		goto error_param;
2602 	}
2603 
2604 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2605 		aq_ret = I40E_ERR_PARAM;
2606 		goto error_param;
2607 	}
2608 
2609 	/* Use the queue bit map sent by the VF */
2610 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2611 				  false)) {
2612 		aq_ret = I40E_ERR_TIMEOUT;
2613 		goto error_param;
2614 	}
2615 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2616 				  false)) {
2617 		aq_ret = I40E_ERR_TIMEOUT;
2618 		goto error_param;
2619 	}
2620 error_param:
2621 	/* send the response to the VF */
2622 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2623 				       aq_ret);
2624 }
2625 
2626 /**
2627  * i40e_check_enough_queue - find big enough queue number
2628  * @vf: pointer to the VF info
2629  * @needed: the number of items needed
2630  *
2631  * Returns the base item index of the queue, or negative for error
2632  **/
i40e_check_enough_queue(struct i40e_vf * vf,u16 needed)2633 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2634 {
2635 	unsigned int  i, cur_queues, more, pool_size;
2636 	struct i40e_lump_tracking *pile;
2637 	struct i40e_pf *pf = vf->pf;
2638 	struct i40e_vsi *vsi;
2639 
2640 	vsi = pf->vsi[vf->lan_vsi_idx];
2641 	cur_queues = vsi->alloc_queue_pairs;
2642 
2643 	/* if current allocated queues are enough for need */
2644 	if (cur_queues >= needed)
2645 		return vsi->base_queue;
2646 
2647 	pile = pf->qp_pile;
2648 	if (cur_queues > 0) {
2649 		/* if the allocated queues are not zero
2650 		 * just check if there are enough queues for more
2651 		 * behind the allocated queues.
2652 		 */
2653 		more = needed - cur_queues;
2654 		for (i = vsi->base_queue + cur_queues;
2655 			i < pile->num_entries; i++) {
2656 			if (pile->list[i] & I40E_PILE_VALID_BIT)
2657 				break;
2658 
2659 			if (more-- == 1)
2660 				/* there is enough */
2661 				return vsi->base_queue;
2662 		}
2663 	}
2664 
2665 	pool_size = 0;
2666 	for (i = 0; i < pile->num_entries; i++) {
2667 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2668 			pool_size = 0;
2669 			continue;
2670 		}
2671 		if (needed <= ++pool_size)
2672 			/* there is enough */
2673 			return i;
2674 	}
2675 
2676 	return -ENOMEM;
2677 }
2678 
2679 /**
2680  * i40e_vc_request_queues_msg
2681  * @vf: pointer to the VF info
2682  * @msg: pointer to the msg buffer
2683  *
2684  * VFs get a default number of queues but can use this message to request a
2685  * different number.  If the request is successful, PF will reset the VF and
2686  * return 0.  If unsuccessful, PF will send message informing VF of number of
2687  * available queues and return result of sending VF a message.
2688  **/
i40e_vc_request_queues_msg(struct i40e_vf * vf,u8 * msg)2689 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2690 {
2691 	struct virtchnl_vf_res_request *vfres =
2692 		(struct virtchnl_vf_res_request *)msg;
2693 	u16 req_pairs = vfres->num_queue_pairs;
2694 	u8 cur_pairs = vf->num_queue_pairs;
2695 	struct i40e_pf *pf = vf->pf;
2696 
2697 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2698 		return -EINVAL;
2699 
2700 	if (req_pairs > I40E_MAX_VF_QUEUES) {
2701 		dev_err(&pf->pdev->dev,
2702 			"VF %d tried to request more than %d queues.\n",
2703 			vf->vf_id,
2704 			I40E_MAX_VF_QUEUES);
2705 		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2706 	} else if (req_pairs - cur_pairs > pf->queues_left) {
2707 		dev_warn(&pf->pdev->dev,
2708 			 "VF %d requested %d more queues, but only %d left.\n",
2709 			 vf->vf_id,
2710 			 req_pairs - cur_pairs,
2711 			 pf->queues_left);
2712 		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2713 	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2714 		dev_warn(&pf->pdev->dev,
2715 			 "VF %d requested %d more queues, but there is not enough for it.\n",
2716 			 vf->vf_id,
2717 			 req_pairs - cur_pairs);
2718 		vfres->num_queue_pairs = cur_pairs;
2719 	} else {
2720 		/* successful request */
2721 		vf->num_req_queues = req_pairs;
2722 		i40e_vc_reset_vf(vf, true);
2723 		return 0;
2724 	}
2725 
2726 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2727 				      (u8 *)vfres, sizeof(*vfres));
2728 }
2729 
2730 /**
2731  * i40e_vc_get_stats_msg
2732  * @vf: pointer to the VF info
2733  * @msg: pointer to the msg buffer
2734  *
2735  * called from the VF to get vsi stats
2736  **/
i40e_vc_get_stats_msg(struct i40e_vf * vf,u8 * msg)2737 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2738 {
2739 	struct virtchnl_queue_select *vqs =
2740 	    (struct virtchnl_queue_select *)msg;
2741 	struct i40e_pf *pf = vf->pf;
2742 	struct i40e_eth_stats stats;
2743 	i40e_status aq_ret = 0;
2744 	struct i40e_vsi *vsi;
2745 
2746 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2747 
2748 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2749 		aq_ret = I40E_ERR_PARAM;
2750 		goto error_param;
2751 	}
2752 
2753 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2754 		aq_ret = I40E_ERR_PARAM;
2755 		goto error_param;
2756 	}
2757 
2758 	vsi = pf->vsi[vf->lan_vsi_idx];
2759 	if (!vsi) {
2760 		aq_ret = I40E_ERR_PARAM;
2761 		goto error_param;
2762 	}
2763 	i40e_update_eth_stats(vsi);
2764 	stats = vsi->eth_stats;
2765 
2766 error_param:
2767 	/* send the response back to the VF */
2768 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2769 				      (u8 *)&stats, sizeof(stats));
2770 }
2771 
2772 #define I40E_MAX_MACVLAN_PER_HW 3072
2773 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2774 	(num_ports))
2775 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2776  * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2777  */
2778 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2779 #define I40E_VC_MAX_VLAN_PER_VF 16
2780 
2781 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2782 ({	typeof(vf_num) vf_num_ = (vf_num);				\
2783 	typeof(num_ports) num_ports_ = (num_ports);			\
2784 	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2785 	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2786 	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2787 /**
2788  * i40e_check_vf_permission
2789  * @vf: pointer to the VF info
2790  * @al: MAC address list from virtchnl
2791  *
2792  * Check that the given list of MAC addresses is allowed. Will return -EPERM
2793  * if any address in the list is not valid. Checks the following conditions:
2794  *
2795  * 1) broadcast and zero addresses are never valid
2796  * 2) unicast addresses are not allowed if the VMM has administratively set
2797  *    the VF MAC address, unless the VF is marked as privileged.
2798  * 3) There is enough space to add all the addresses.
2799  *
2800  * Note that to guarantee consistency, it is expected this function be called
2801  * while holding the mac_filter_hash_lock, as otherwise the current number of
2802  * addresses might not be accurate.
2803  **/
i40e_check_vf_permission(struct i40e_vf * vf,struct virtchnl_ether_addr_list * al)2804 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2805 					   struct virtchnl_ether_addr_list *al)
2806 {
2807 	struct i40e_pf *pf = vf->pf;
2808 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2809 	struct i40e_hw *hw = &pf->hw;
2810 	int mac2add_cnt = 0;
2811 	int i;
2812 
2813 	for (i = 0; i < al->num_elements; i++) {
2814 		struct i40e_mac_filter *f;
2815 		u8 *addr = al->list[i].addr;
2816 
2817 		if (is_broadcast_ether_addr(addr) ||
2818 		    is_zero_ether_addr(addr)) {
2819 			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2820 				addr);
2821 			return I40E_ERR_INVALID_MAC_ADDR;
2822 		}
2823 
2824 		/* If the host VMM administrator has set the VF MAC address
2825 		 * administratively via the ndo_set_vf_mac command then deny
2826 		 * permission to the VF to add or delete unicast MAC addresses.
2827 		 * Unless the VF is privileged and then it can do whatever.
2828 		 * The VF may request to set the MAC address filter already
2829 		 * assigned to it so do not return an error in that case.
2830 		 */
2831 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2832 		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2833 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2834 			dev_err(&pf->pdev->dev,
2835 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2836 			return -EPERM;
2837 		}
2838 
2839 		/*count filters that really will be added*/
2840 		f = i40e_find_mac(vsi, addr);
2841 		if (!f)
2842 			++mac2add_cnt;
2843 	}
2844 
2845 	/* If this VF is not privileged, then we can't add more than a limited
2846 	 * number of addresses. Check to make sure that the additions do not
2847 	 * push us over the limit.
2848 	 */
2849 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2850 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2851 		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2852 			dev_err(&pf->pdev->dev,
2853 				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2854 			return -EPERM;
2855 		}
2856 	/* If this VF is trusted, it can use more resources than untrusted.
2857 	 * However to ensure that every trusted VF has appropriate number of
2858 	 * resources, divide whole pool of resources per port and then across
2859 	 * all VFs.
2860 	 */
2861 	} else {
2862 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2863 		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2864 						       hw->num_ports)) {
2865 			dev_err(&pf->pdev->dev,
2866 				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2867 			return -EPERM;
2868 		}
2869 	}
2870 	return 0;
2871 }
2872 
2873 /**
2874  * i40e_vc_add_mac_addr_msg
2875  * @vf: pointer to the VF info
2876  * @msg: pointer to the msg buffer
2877  *
2878  * add guest mac address filter
2879  **/
i40e_vc_add_mac_addr_msg(struct i40e_vf * vf,u8 * msg)2880 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2881 {
2882 	struct virtchnl_ether_addr_list *al =
2883 	    (struct virtchnl_ether_addr_list *)msg;
2884 	struct i40e_pf *pf = vf->pf;
2885 	struct i40e_vsi *vsi = NULL;
2886 	i40e_status ret = 0;
2887 	int i;
2888 
2889 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2890 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2891 		ret = I40E_ERR_PARAM;
2892 		goto error_param;
2893 	}
2894 
2895 	vsi = pf->vsi[vf->lan_vsi_idx];
2896 
2897 	/* Lock once, because all function inside for loop accesses VSI's
2898 	 * MAC filter list which needs to be protected using same lock.
2899 	 */
2900 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2901 
2902 	ret = i40e_check_vf_permission(vf, al);
2903 	if (ret) {
2904 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2905 		goto error_param;
2906 	}
2907 
2908 	/* add new addresses to the list */
2909 	for (i = 0; i < al->num_elements; i++) {
2910 		struct i40e_mac_filter *f;
2911 
2912 		f = i40e_find_mac(vsi, al->list[i].addr);
2913 		if (!f) {
2914 			f = i40e_add_mac_filter(vsi, al->list[i].addr);
2915 
2916 			if (!f) {
2917 				dev_err(&pf->pdev->dev,
2918 					"Unable to add MAC filter %pM for VF %d\n",
2919 					al->list[i].addr, vf->vf_id);
2920 				ret = I40E_ERR_PARAM;
2921 				spin_unlock_bh(&vsi->mac_filter_hash_lock);
2922 				goto error_param;
2923 			}
2924 			if (is_valid_ether_addr(al->list[i].addr) &&
2925 			    is_zero_ether_addr(vf->default_lan_addr.addr))
2926 				ether_addr_copy(vf->default_lan_addr.addr,
2927 						al->list[i].addr);
2928 		}
2929 	}
2930 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2931 
2932 	/* program the updated filter list */
2933 	ret = i40e_sync_vsi_filters(vsi);
2934 	if (ret)
2935 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2936 			vf->vf_id, ret);
2937 
2938 error_param:
2939 	/* send the response to the VF */
2940 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2941 				      ret, NULL, 0);
2942 }
2943 
2944 /**
2945  * i40e_vc_del_mac_addr_msg
2946  * @vf: pointer to the VF info
2947  * @msg: pointer to the msg buffer
2948  *
2949  * remove guest mac address filter
2950  **/
i40e_vc_del_mac_addr_msg(struct i40e_vf * vf,u8 * msg)2951 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2952 {
2953 	struct virtchnl_ether_addr_list *al =
2954 	    (struct virtchnl_ether_addr_list *)msg;
2955 	bool was_unimac_deleted = false;
2956 	struct i40e_pf *pf = vf->pf;
2957 	struct i40e_vsi *vsi = NULL;
2958 	i40e_status ret = 0;
2959 	int i;
2960 
2961 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2962 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2963 		ret = I40E_ERR_PARAM;
2964 		goto error_param;
2965 	}
2966 
2967 	for (i = 0; i < al->num_elements; i++) {
2968 		if (is_broadcast_ether_addr(al->list[i].addr) ||
2969 		    is_zero_ether_addr(al->list[i].addr)) {
2970 			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2971 				al->list[i].addr, vf->vf_id);
2972 			ret = I40E_ERR_INVALID_MAC_ADDR;
2973 			goto error_param;
2974 		}
2975 		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2976 			was_unimac_deleted = true;
2977 	}
2978 	vsi = pf->vsi[vf->lan_vsi_idx];
2979 
2980 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2981 	/* delete addresses from the list */
2982 	for (i = 0; i < al->num_elements; i++)
2983 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2984 			ret = I40E_ERR_INVALID_MAC_ADDR;
2985 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
2986 			goto error_param;
2987 		}
2988 
2989 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2990 
2991 	/* program the updated filter list */
2992 	ret = i40e_sync_vsi_filters(vsi);
2993 	if (ret)
2994 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2995 			vf->vf_id, ret);
2996 
2997 	if (vf->trusted && was_unimac_deleted) {
2998 		struct i40e_mac_filter *f;
2999 		struct hlist_node *h;
3000 		u8 *macaddr = NULL;
3001 		int bkt;
3002 
3003 		/* set last unicast mac address as default */
3004 		spin_lock_bh(&vsi->mac_filter_hash_lock);
3005 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3006 			if (is_valid_ether_addr(f->macaddr))
3007 				macaddr = f->macaddr;
3008 		}
3009 		if (macaddr)
3010 			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3011 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3012 	}
3013 error_param:
3014 	/* send the response to the VF */
3015 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3016 }
3017 
3018 /**
3019  * i40e_vc_add_vlan_msg
3020  * @vf: pointer to the VF info
3021  * @msg: pointer to the msg buffer
3022  *
3023  * program guest vlan id
3024  **/
i40e_vc_add_vlan_msg(struct i40e_vf * vf,u8 * msg)3025 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3026 {
3027 	struct virtchnl_vlan_filter_list *vfl =
3028 	    (struct virtchnl_vlan_filter_list *)msg;
3029 	struct i40e_pf *pf = vf->pf;
3030 	struct i40e_vsi *vsi = NULL;
3031 	i40e_status aq_ret = 0;
3032 	int i;
3033 
3034 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3035 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3036 		dev_err(&pf->pdev->dev,
3037 			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3038 		goto error_param;
3039 	}
3040 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3041 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3042 		aq_ret = I40E_ERR_PARAM;
3043 		goto error_param;
3044 	}
3045 
3046 	for (i = 0; i < vfl->num_elements; i++) {
3047 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3048 			aq_ret = I40E_ERR_PARAM;
3049 			dev_err(&pf->pdev->dev,
3050 				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3051 			goto error_param;
3052 		}
3053 	}
3054 	vsi = pf->vsi[vf->lan_vsi_idx];
3055 	if (vsi->info.pvid) {
3056 		aq_ret = I40E_ERR_PARAM;
3057 		goto error_param;
3058 	}
3059 
3060 	i40e_vlan_stripping_enable(vsi);
3061 	for (i = 0; i < vfl->num_elements; i++) {
3062 		/* add new VLAN filter */
3063 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3064 		if (!ret)
3065 			vf->num_vlan++;
3066 
3067 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3068 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3069 							   true,
3070 							   vfl->vlan_id[i],
3071 							   NULL);
3072 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3073 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3074 							   true,
3075 							   vfl->vlan_id[i],
3076 							   NULL);
3077 
3078 		if (ret)
3079 			dev_err(&pf->pdev->dev,
3080 				"Unable to add VLAN filter %d for VF %d, error %d\n",
3081 				vfl->vlan_id[i], vf->vf_id, ret);
3082 	}
3083 
3084 error_param:
3085 	/* send the response to the VF */
3086 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3087 }
3088 
3089 /**
3090  * i40e_vc_remove_vlan_msg
3091  * @vf: pointer to the VF info
3092  * @msg: pointer to the msg buffer
3093  *
3094  * remove programmed guest vlan id
3095  **/
i40e_vc_remove_vlan_msg(struct i40e_vf * vf,u8 * msg)3096 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3097 {
3098 	struct virtchnl_vlan_filter_list *vfl =
3099 	    (struct virtchnl_vlan_filter_list *)msg;
3100 	struct i40e_pf *pf = vf->pf;
3101 	struct i40e_vsi *vsi = NULL;
3102 	i40e_status aq_ret = 0;
3103 	int i;
3104 
3105 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3106 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3107 		aq_ret = I40E_ERR_PARAM;
3108 		goto error_param;
3109 	}
3110 
3111 	for (i = 0; i < vfl->num_elements; i++) {
3112 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3113 			aq_ret = I40E_ERR_PARAM;
3114 			goto error_param;
3115 		}
3116 	}
3117 
3118 	vsi = pf->vsi[vf->lan_vsi_idx];
3119 	if (vsi->info.pvid) {
3120 		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3121 			aq_ret = I40E_ERR_PARAM;
3122 		goto error_param;
3123 	}
3124 
3125 	for (i = 0; i < vfl->num_elements; i++) {
3126 		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3127 		vf->num_vlan--;
3128 
3129 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3130 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3131 							   false,
3132 							   vfl->vlan_id[i],
3133 							   NULL);
3134 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3135 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3136 							   false,
3137 							   vfl->vlan_id[i],
3138 							   NULL);
3139 	}
3140 
3141 error_param:
3142 	/* send the response to the VF */
3143 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3144 }
3145 
3146 /**
3147  * i40e_vc_iwarp_msg
3148  * @vf: pointer to the VF info
3149  * @msg: pointer to the msg buffer
3150  * @msglen: msg length
3151  *
3152  * called from the VF for the iwarp msgs
3153  **/
i40e_vc_iwarp_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)3154 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3155 {
3156 	struct i40e_pf *pf = vf->pf;
3157 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3158 	i40e_status aq_ret = 0;
3159 
3160 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3161 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3162 		aq_ret = I40E_ERR_PARAM;
3163 		goto error_param;
3164 	}
3165 
3166 	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3167 				     msg, msglen);
3168 
3169 error_param:
3170 	/* send the response to the VF */
3171 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3172 				       aq_ret);
3173 }
3174 
3175 /**
3176  * i40e_vc_iwarp_qvmap_msg
3177  * @vf: pointer to the VF info
3178  * @msg: pointer to the msg buffer
3179  * @config: config qvmap or release it
3180  *
3181  * called from the VF for the iwarp msgs
3182  **/
i40e_vc_iwarp_qvmap_msg(struct i40e_vf * vf,u8 * msg,bool config)3183 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3184 {
3185 	struct virtchnl_iwarp_qvlist_info *qvlist_info =
3186 				(struct virtchnl_iwarp_qvlist_info *)msg;
3187 	i40e_status aq_ret = 0;
3188 
3189 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3190 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3191 		aq_ret = I40E_ERR_PARAM;
3192 		goto error_param;
3193 	}
3194 
3195 	if (config) {
3196 		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3197 			aq_ret = I40E_ERR_PARAM;
3198 	} else {
3199 		i40e_release_iwarp_qvlist(vf);
3200 	}
3201 
3202 error_param:
3203 	/* send the response to the VF */
3204 	return i40e_vc_send_resp_to_vf(vf,
3205 			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3206 			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3207 			       aq_ret);
3208 }
3209 
3210 /**
3211  * i40e_vc_config_rss_key
3212  * @vf: pointer to the VF info
3213  * @msg: pointer to the msg buffer
3214  *
3215  * Configure the VF's RSS key
3216  **/
i40e_vc_config_rss_key(struct i40e_vf * vf,u8 * msg)3217 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3218 {
3219 	struct virtchnl_rss_key *vrk =
3220 		(struct virtchnl_rss_key *)msg;
3221 	struct i40e_pf *pf = vf->pf;
3222 	struct i40e_vsi *vsi = NULL;
3223 	i40e_status aq_ret = 0;
3224 
3225 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3226 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3227 	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3228 		aq_ret = I40E_ERR_PARAM;
3229 		goto err;
3230 	}
3231 
3232 	vsi = pf->vsi[vf->lan_vsi_idx];
3233 	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3234 err:
3235 	/* send the response to the VF */
3236 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3237 				       aq_ret);
3238 }
3239 
3240 /**
3241  * i40e_vc_config_rss_lut
3242  * @vf: pointer to the VF info
3243  * @msg: pointer to the msg buffer
3244  *
3245  * Configure the VF's RSS LUT
3246  **/
i40e_vc_config_rss_lut(struct i40e_vf * vf,u8 * msg)3247 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3248 {
3249 	struct virtchnl_rss_lut *vrl =
3250 		(struct virtchnl_rss_lut *)msg;
3251 	struct i40e_pf *pf = vf->pf;
3252 	struct i40e_vsi *vsi = NULL;
3253 	i40e_status aq_ret = 0;
3254 	u16 i;
3255 
3256 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3257 	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3258 	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3259 		aq_ret = I40E_ERR_PARAM;
3260 		goto err;
3261 	}
3262 
3263 	for (i = 0; i < vrl->lut_entries; i++)
3264 		if (vrl->lut[i] >= vf->num_queue_pairs) {
3265 			aq_ret = I40E_ERR_PARAM;
3266 			goto err;
3267 		}
3268 
3269 	vsi = pf->vsi[vf->lan_vsi_idx];
3270 	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3271 	/* send the response to the VF */
3272 err:
3273 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3274 				       aq_ret);
3275 }
3276 
3277 /**
3278  * i40e_vc_get_rss_hena
3279  * @vf: pointer to the VF info
3280  * @msg: pointer to the msg buffer
3281  *
3282  * Return the RSS HENA bits allowed by the hardware
3283  **/
i40e_vc_get_rss_hena(struct i40e_vf * vf,u8 * msg)3284 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3285 {
3286 	struct virtchnl_rss_hena *vrh = NULL;
3287 	struct i40e_pf *pf = vf->pf;
3288 	i40e_status aq_ret = 0;
3289 	int len = 0;
3290 
3291 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3292 		aq_ret = I40E_ERR_PARAM;
3293 		goto err;
3294 	}
3295 	len = sizeof(struct virtchnl_rss_hena);
3296 
3297 	vrh = kzalloc(len, GFP_KERNEL);
3298 	if (!vrh) {
3299 		aq_ret = I40E_ERR_NO_MEMORY;
3300 		len = 0;
3301 		goto err;
3302 	}
3303 	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3304 err:
3305 	/* send the response back to the VF */
3306 	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3307 					aq_ret, (u8 *)vrh, len);
3308 	kfree(vrh);
3309 	return aq_ret;
3310 }
3311 
3312 /**
3313  * i40e_vc_set_rss_hena
3314  * @vf: pointer to the VF info
3315  * @msg: pointer to the msg buffer
3316  *
3317  * Set the RSS HENA bits for the VF
3318  **/
i40e_vc_set_rss_hena(struct i40e_vf * vf,u8 * msg)3319 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3320 {
3321 	struct virtchnl_rss_hena *vrh =
3322 		(struct virtchnl_rss_hena *)msg;
3323 	struct i40e_pf *pf = vf->pf;
3324 	struct i40e_hw *hw = &pf->hw;
3325 	i40e_status aq_ret = 0;
3326 
3327 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3328 		aq_ret = I40E_ERR_PARAM;
3329 		goto err;
3330 	}
3331 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3332 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3333 			  (u32)(vrh->hena >> 32));
3334 
3335 	/* send the response to the VF */
3336 err:
3337 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3338 }
3339 
3340 /**
3341  * i40e_vc_enable_vlan_stripping
3342  * @vf: pointer to the VF info
3343  * @msg: pointer to the msg buffer
3344  *
3345  * Enable vlan header stripping for the VF
3346  **/
i40e_vc_enable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3347 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3348 {
3349 	i40e_status aq_ret = 0;
3350 	struct i40e_vsi *vsi;
3351 
3352 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3353 		aq_ret = I40E_ERR_PARAM;
3354 		goto err;
3355 	}
3356 
3357 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3358 	i40e_vlan_stripping_enable(vsi);
3359 
3360 	/* send the response to the VF */
3361 err:
3362 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3363 				       aq_ret);
3364 }
3365 
3366 /**
3367  * i40e_vc_disable_vlan_stripping
3368  * @vf: pointer to the VF info
3369  * @msg: pointer to the msg buffer
3370  *
3371  * Disable vlan header stripping for the VF
3372  **/
i40e_vc_disable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3373 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3374 {
3375 	i40e_status aq_ret = 0;
3376 	struct i40e_vsi *vsi;
3377 
3378 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3379 		aq_ret = I40E_ERR_PARAM;
3380 		goto err;
3381 	}
3382 
3383 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3384 	i40e_vlan_stripping_disable(vsi);
3385 
3386 	/* send the response to the VF */
3387 err:
3388 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3389 				       aq_ret);
3390 }
3391 
3392 /**
3393  * i40e_validate_cloud_filter
3394  * @vf: pointer to VF structure
3395  * @tc_filter: pointer to filter requested
3396  *
3397  * This function validates cloud filter programmed as TC filter for ADq
3398  **/
i40e_validate_cloud_filter(struct i40e_vf * vf,struct virtchnl_filter * tc_filter)3399 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3400 				      struct virtchnl_filter *tc_filter)
3401 {
3402 	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3403 	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3404 	struct i40e_pf *pf = vf->pf;
3405 	struct i40e_vsi *vsi = NULL;
3406 	struct i40e_mac_filter *f;
3407 	struct hlist_node *h;
3408 	bool found = false;
3409 	int bkt;
3410 
3411 	if (!tc_filter->action) {
3412 		dev_info(&pf->pdev->dev,
3413 			 "VF %d: Currently ADq doesn't support Drop Action\n",
3414 			 vf->vf_id);
3415 		goto err;
3416 	}
3417 
3418 	/* action_meta is TC number here to which the filter is applied */
3419 	if (!tc_filter->action_meta ||
3420 	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
3421 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3422 			 vf->vf_id, tc_filter->action_meta);
3423 		goto err;
3424 	}
3425 
3426 	/* Check filter if it's programmed for advanced mode or basic mode.
3427 	 * There are two ADq modes (for VF only),
3428 	 * 1. Basic mode: intended to allow as many filter options as possible
3429 	 *		  to be added to a VF in Non-trusted mode. Main goal is
3430 	 *		  to add filters to its own MAC and VLAN id.
3431 	 * 2. Advanced mode: is for allowing filters to be applied other than
3432 	 *		  its own MAC or VLAN. This mode requires the VF to be
3433 	 *		  Trusted.
3434 	 */
3435 	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3436 		vsi = pf->vsi[vf->lan_vsi_idx];
3437 		f = i40e_find_mac(vsi, data.dst_mac);
3438 
3439 		if (!f) {
3440 			dev_info(&pf->pdev->dev,
3441 				 "Destination MAC %pM doesn't belong to VF %d\n",
3442 				 data.dst_mac, vf->vf_id);
3443 			goto err;
3444 		}
3445 
3446 		if (mask.vlan_id) {
3447 			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3448 					   hlist) {
3449 				if (f->vlan == ntohs(data.vlan_id)) {
3450 					found = true;
3451 					break;
3452 				}
3453 			}
3454 			if (!found) {
3455 				dev_info(&pf->pdev->dev,
3456 					 "VF %d doesn't have any VLAN id %u\n",
3457 					 vf->vf_id, ntohs(data.vlan_id));
3458 				goto err;
3459 			}
3460 		}
3461 	} else {
3462 		/* Check if VF is trusted */
3463 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3464 			dev_err(&pf->pdev->dev,
3465 				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3466 				vf->vf_id);
3467 			return I40E_ERR_CONFIG;
3468 		}
3469 	}
3470 
3471 	if (mask.dst_mac[0] & data.dst_mac[0]) {
3472 		if (is_broadcast_ether_addr(data.dst_mac) ||
3473 		    is_zero_ether_addr(data.dst_mac)) {
3474 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3475 				 vf->vf_id, data.dst_mac);
3476 			goto err;
3477 		}
3478 	}
3479 
3480 	if (mask.src_mac[0] & data.src_mac[0]) {
3481 		if (is_broadcast_ether_addr(data.src_mac) ||
3482 		    is_zero_ether_addr(data.src_mac)) {
3483 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3484 				 vf->vf_id, data.src_mac);
3485 			goto err;
3486 		}
3487 	}
3488 
3489 	if (mask.dst_port & data.dst_port) {
3490 		if (!data.dst_port) {
3491 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3492 				 vf->vf_id);
3493 			goto err;
3494 		}
3495 	}
3496 
3497 	if (mask.src_port & data.src_port) {
3498 		if (!data.src_port) {
3499 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3500 				 vf->vf_id);
3501 			goto err;
3502 		}
3503 	}
3504 
3505 	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3506 	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3507 		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3508 			 vf->vf_id);
3509 		goto err;
3510 	}
3511 
3512 	if (mask.vlan_id & data.vlan_id) {
3513 		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3514 			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3515 				 vf->vf_id);
3516 			goto err;
3517 		}
3518 	}
3519 
3520 	return I40E_SUCCESS;
3521 err:
3522 	return I40E_ERR_CONFIG;
3523 }
3524 
3525 /**
3526  * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3527  * @vf: pointer to the VF info
3528  * @seid: seid of the vsi it is searching for
3529  **/
i40e_find_vsi_from_seid(struct i40e_vf * vf,u16 seid)3530 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3531 {
3532 	struct i40e_pf *pf = vf->pf;
3533 	struct i40e_vsi *vsi = NULL;
3534 	int i;
3535 
3536 	for (i = 0; i < vf->num_tc ; i++) {
3537 		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3538 		if (vsi && vsi->seid == seid)
3539 			return vsi;
3540 	}
3541 	return NULL;
3542 }
3543 
3544 /**
3545  * i40e_del_all_cloud_filters
3546  * @vf: pointer to the VF info
3547  *
3548  * This function deletes all cloud filters
3549  **/
i40e_del_all_cloud_filters(struct i40e_vf * vf)3550 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3551 {
3552 	struct i40e_cloud_filter *cfilter = NULL;
3553 	struct i40e_pf *pf = vf->pf;
3554 	struct i40e_vsi *vsi = NULL;
3555 	struct hlist_node *node;
3556 	int ret;
3557 
3558 	hlist_for_each_entry_safe(cfilter, node,
3559 				  &vf->cloud_filter_list, cloud_node) {
3560 		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3561 
3562 		if (!vsi) {
3563 			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3564 				vf->vf_id, cfilter->seid);
3565 			continue;
3566 		}
3567 
3568 		if (cfilter->dst_port)
3569 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3570 								false);
3571 		else
3572 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3573 		if (ret)
3574 			dev_err(&pf->pdev->dev,
3575 				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3576 				vf->vf_id, i40e_stat_str(&pf->hw, ret),
3577 				i40e_aq_str(&pf->hw,
3578 					    pf->hw.aq.asq_last_status));
3579 
3580 		hlist_del(&cfilter->cloud_node);
3581 		kfree(cfilter);
3582 		vf->num_cloud_filters--;
3583 	}
3584 }
3585 
3586 /**
3587  * i40e_vc_del_cloud_filter
3588  * @vf: pointer to the VF info
3589  * @msg: pointer to the msg buffer
3590  *
3591  * This function deletes a cloud filter programmed as TC filter for ADq
3592  **/
i40e_vc_del_cloud_filter(struct i40e_vf * vf,u8 * msg)3593 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3594 {
3595 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3596 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3597 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3598 	struct i40e_cloud_filter cfilter, *cf = NULL;
3599 	struct i40e_pf *pf = vf->pf;
3600 	struct i40e_vsi *vsi = NULL;
3601 	struct hlist_node *node;
3602 	i40e_status aq_ret = 0;
3603 	int i, ret;
3604 
3605 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3606 		aq_ret = I40E_ERR_PARAM;
3607 		goto err;
3608 	}
3609 
3610 	if (!vf->adq_enabled) {
3611 		dev_info(&pf->pdev->dev,
3612 			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3613 			 vf->vf_id);
3614 		aq_ret = I40E_ERR_PARAM;
3615 		goto err;
3616 	}
3617 
3618 	if (i40e_validate_cloud_filter(vf, vcf)) {
3619 		dev_info(&pf->pdev->dev,
3620 			 "VF %d: Invalid input, can't apply cloud filter\n",
3621 			 vf->vf_id);
3622 		aq_ret = I40E_ERR_PARAM;
3623 		goto err;
3624 	}
3625 
3626 	memset(&cfilter, 0, sizeof(cfilter));
3627 	/* parse destination mac address */
3628 	for (i = 0; i < ETH_ALEN; i++)
3629 		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3630 
3631 	/* parse source mac address */
3632 	for (i = 0; i < ETH_ALEN; i++)
3633 		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3634 
3635 	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3636 	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3637 	cfilter.src_port = mask.src_port & tcf.src_port;
3638 
3639 	switch (vcf->flow_type) {
3640 	case VIRTCHNL_TCP_V4_FLOW:
3641 		cfilter.n_proto = ETH_P_IP;
3642 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3643 			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3644 			       ARRAY_SIZE(tcf.dst_ip));
3645 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3646 			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3647 			       ARRAY_SIZE(tcf.dst_ip));
3648 		break;
3649 	case VIRTCHNL_TCP_V6_FLOW:
3650 		cfilter.n_proto = ETH_P_IPV6;
3651 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3652 			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3653 			       sizeof(cfilter.ip.v6.dst_ip6));
3654 		if (mask.src_ip[3] & tcf.src_ip[3])
3655 			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3656 			       sizeof(cfilter.ip.v6.src_ip6));
3657 		break;
3658 	default:
3659 		/* TC filter can be configured based on different combinations
3660 		 * and in this case IP is not a part of filter config
3661 		 */
3662 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3663 			 vf->vf_id);
3664 	}
3665 
3666 	/* get the vsi to which the tc belongs to */
3667 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3668 	cfilter.seid = vsi->seid;
3669 	cfilter.flags = vcf->field_flags;
3670 
3671 	/* Deleting TC filter */
3672 	if (tcf.dst_port)
3673 		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3674 	else
3675 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3676 	if (ret) {
3677 		dev_err(&pf->pdev->dev,
3678 			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3679 			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3680 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3681 		goto err;
3682 	}
3683 
3684 	hlist_for_each_entry_safe(cf, node,
3685 				  &vf->cloud_filter_list, cloud_node) {
3686 		if (cf->seid != cfilter.seid)
3687 			continue;
3688 		if (mask.dst_port)
3689 			if (cfilter.dst_port != cf->dst_port)
3690 				continue;
3691 		if (mask.dst_mac[0])
3692 			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3693 				continue;
3694 		/* for ipv4 data to be valid, only first byte of mask is set */
3695 		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3696 			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3697 				   ARRAY_SIZE(tcf.dst_ip)))
3698 				continue;
3699 		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3700 		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3701 			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3702 				   sizeof(cfilter.ip.v6.src_ip6)))
3703 				continue;
3704 		if (mask.vlan_id)
3705 			if (cfilter.vlan_id != cf->vlan_id)
3706 				continue;
3707 
3708 		hlist_del(&cf->cloud_node);
3709 		kfree(cf);
3710 		vf->num_cloud_filters--;
3711 	}
3712 
3713 err:
3714 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3715 				       aq_ret);
3716 }
3717 
3718 /**
3719  * i40e_vc_add_cloud_filter
3720  * @vf: pointer to the VF info
3721  * @msg: pointer to the msg buffer
3722  *
3723  * This function adds a cloud filter programmed as TC filter for ADq
3724  **/
i40e_vc_add_cloud_filter(struct i40e_vf * vf,u8 * msg)3725 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3726 {
3727 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3728 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3729 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3730 	struct i40e_cloud_filter *cfilter = NULL;
3731 	struct i40e_pf *pf = vf->pf;
3732 	struct i40e_vsi *vsi = NULL;
3733 	i40e_status aq_ret = 0;
3734 	int i, ret;
3735 
3736 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3737 		aq_ret = I40E_ERR_PARAM;
3738 		goto err_out;
3739 	}
3740 
3741 	if (!vf->adq_enabled) {
3742 		dev_info(&pf->pdev->dev,
3743 			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3744 			 vf->vf_id);
3745 		aq_ret = I40E_ERR_PARAM;
3746 		goto err_out;
3747 	}
3748 
3749 	if (i40e_validate_cloud_filter(vf, vcf)) {
3750 		dev_info(&pf->pdev->dev,
3751 			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3752 			 vf->vf_id);
3753 		aq_ret = I40E_ERR_PARAM;
3754 		goto err_out;
3755 	}
3756 
3757 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3758 	if (!cfilter)
3759 		return -ENOMEM;
3760 
3761 	/* parse destination mac address */
3762 	for (i = 0; i < ETH_ALEN; i++)
3763 		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3764 
3765 	/* parse source mac address */
3766 	for (i = 0; i < ETH_ALEN; i++)
3767 		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3768 
3769 	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3770 	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3771 	cfilter->src_port = mask.src_port & tcf.src_port;
3772 
3773 	switch (vcf->flow_type) {
3774 	case VIRTCHNL_TCP_V4_FLOW:
3775 		cfilter->n_proto = ETH_P_IP;
3776 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3777 			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3778 			       ARRAY_SIZE(tcf.dst_ip));
3779 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3780 			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3781 			       ARRAY_SIZE(tcf.dst_ip));
3782 		break;
3783 	case VIRTCHNL_TCP_V6_FLOW:
3784 		cfilter->n_proto = ETH_P_IPV6;
3785 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3786 			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3787 			       sizeof(cfilter->ip.v6.dst_ip6));
3788 		if (mask.src_ip[3] & tcf.src_ip[3])
3789 			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3790 			       sizeof(cfilter->ip.v6.src_ip6));
3791 		break;
3792 	default:
3793 		/* TC filter can be configured based on different combinations
3794 		 * and in this case IP is not a part of filter config
3795 		 */
3796 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3797 			 vf->vf_id);
3798 	}
3799 
3800 	/* get the VSI to which the TC belongs to */
3801 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3802 	cfilter->seid = vsi->seid;
3803 	cfilter->flags = vcf->field_flags;
3804 
3805 	/* Adding cloud filter programmed as TC filter */
3806 	if (tcf.dst_port)
3807 		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3808 	else
3809 		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3810 	if (ret) {
3811 		dev_err(&pf->pdev->dev,
3812 			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3813 			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3814 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3815 		goto err_free;
3816 	}
3817 
3818 	INIT_HLIST_NODE(&cfilter->cloud_node);
3819 	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3820 	/* release the pointer passing it to the collection */
3821 	cfilter = NULL;
3822 	vf->num_cloud_filters++;
3823 err_free:
3824 	kfree(cfilter);
3825 err_out:
3826 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3827 				       aq_ret);
3828 }
3829 
3830 /**
3831  * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3832  * @vf: pointer to the VF info
3833  * @msg: pointer to the msg buffer
3834  **/
i40e_vc_add_qch_msg(struct i40e_vf * vf,u8 * msg)3835 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3836 {
3837 	struct virtchnl_tc_info *tci =
3838 		(struct virtchnl_tc_info *)msg;
3839 	struct i40e_pf *pf = vf->pf;
3840 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3841 	int i, adq_request_qps = 0;
3842 	i40e_status aq_ret = 0;
3843 	u64 speed = 0;
3844 
3845 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3846 		aq_ret = I40E_ERR_PARAM;
3847 		goto err;
3848 	}
3849 
3850 	/* ADq cannot be applied if spoof check is ON */
3851 	if (vf->spoofchk) {
3852 		dev_err(&pf->pdev->dev,
3853 			"Spoof check is ON, turn it OFF to enable ADq\n");
3854 		aq_ret = I40E_ERR_PARAM;
3855 		goto err;
3856 	}
3857 
3858 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3859 		dev_err(&pf->pdev->dev,
3860 			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3861 			vf->vf_id);
3862 		aq_ret = I40E_ERR_PARAM;
3863 		goto err;
3864 	}
3865 
3866 	/* max number of traffic classes for VF currently capped at 4 */
3867 	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3868 		dev_err(&pf->pdev->dev,
3869 			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3870 			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3871 		aq_ret = I40E_ERR_PARAM;
3872 		goto err;
3873 	}
3874 
3875 	/* validate queues for each TC */
3876 	for (i = 0; i < tci->num_tc; i++)
3877 		if (!tci->list[i].count ||
3878 		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3879 			dev_err(&pf->pdev->dev,
3880 				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3881 				vf->vf_id, i, tci->list[i].count,
3882 				I40E_DEFAULT_QUEUES_PER_VF);
3883 			aq_ret = I40E_ERR_PARAM;
3884 			goto err;
3885 		}
3886 
3887 	/* need Max VF queues but already have default number of queues */
3888 	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3889 
3890 	if (pf->queues_left < adq_request_qps) {
3891 		dev_err(&pf->pdev->dev,
3892 			"No queues left to allocate to VF %d\n",
3893 			vf->vf_id);
3894 		aq_ret = I40E_ERR_PARAM;
3895 		goto err;
3896 	} else {
3897 		/* we need to allocate max VF queues to enable ADq so as to
3898 		 * make sure ADq enabled VF always gets back queues when it
3899 		 * goes through a reset.
3900 		 */
3901 		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3902 	}
3903 
3904 	/* get link speed in MB to validate rate limit */
3905 	speed = i40e_vc_link_speed2mbps(ls->link_speed);
3906 	if (speed == SPEED_UNKNOWN) {
3907 		dev_err(&pf->pdev->dev,
3908 			"Cannot detect link speed\n");
3909 		aq_ret = I40E_ERR_PARAM;
3910 		goto err;
3911 	}
3912 
3913 	/* parse data from the queue channel info */
3914 	vf->num_tc = tci->num_tc;
3915 	for (i = 0; i < vf->num_tc; i++) {
3916 		if (tci->list[i].max_tx_rate) {
3917 			if (tci->list[i].max_tx_rate > speed) {
3918 				dev_err(&pf->pdev->dev,
3919 					"Invalid max tx rate %llu specified for VF %d.",
3920 					tci->list[i].max_tx_rate,
3921 					vf->vf_id);
3922 				aq_ret = I40E_ERR_PARAM;
3923 				goto err;
3924 			} else {
3925 				vf->ch[i].max_tx_rate =
3926 					tci->list[i].max_tx_rate;
3927 			}
3928 		}
3929 		vf->ch[i].num_qps = tci->list[i].count;
3930 	}
3931 
3932 	/* set this flag only after making sure all inputs are sane */
3933 	vf->adq_enabled = true;
3934 
3935 	/* reset the VF in order to allocate resources */
3936 	i40e_vc_reset_vf(vf, true);
3937 
3938 	return I40E_SUCCESS;
3939 
3940 	/* send the response to the VF */
3941 err:
3942 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3943 				       aq_ret);
3944 }
3945 
3946 /**
3947  * i40e_vc_del_qch_msg
3948  * @vf: pointer to the VF info
3949  * @msg: pointer to the msg buffer
3950  **/
i40e_vc_del_qch_msg(struct i40e_vf * vf,u8 * msg)3951 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3952 {
3953 	struct i40e_pf *pf = vf->pf;
3954 	i40e_status aq_ret = 0;
3955 
3956 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3957 		aq_ret = I40E_ERR_PARAM;
3958 		goto err;
3959 	}
3960 
3961 	if (vf->adq_enabled) {
3962 		i40e_del_all_cloud_filters(vf);
3963 		i40e_del_qch(vf);
3964 		vf->adq_enabled = false;
3965 		vf->num_tc = 0;
3966 		dev_info(&pf->pdev->dev,
3967 			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3968 			 vf->vf_id);
3969 	} else {
3970 		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3971 			 vf->vf_id);
3972 		aq_ret = I40E_ERR_PARAM;
3973 	}
3974 
3975 	/* reset the VF in order to allocate resources */
3976 	i40e_vc_reset_vf(vf, true);
3977 
3978 	return I40E_SUCCESS;
3979 
3980 err:
3981 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3982 				       aq_ret);
3983 }
3984 
3985 /**
3986  * i40e_vc_process_vf_msg
3987  * @pf: pointer to the PF structure
3988  * @vf_id: source VF id
3989  * @v_opcode: operation code
3990  * @v_retval: unused return value code
3991  * @msg: pointer to the msg buffer
3992  * @msglen: msg length
3993  *
3994  * called from the common aeq/arq handler to
3995  * process request from VF
3996  **/
i40e_vc_process_vf_msg(struct i40e_pf * pf,s16 vf_id,u32 v_opcode,u32 __always_unused v_retval,u8 * msg,u16 msglen)3997 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3998 			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
3999 {
4000 	struct i40e_hw *hw = &pf->hw;
4001 	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4002 	struct i40e_vf *vf;
4003 	int ret;
4004 
4005 	pf->vf_aq_requests++;
4006 	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4007 		return -EINVAL;
4008 	vf = &(pf->vf[local_vf_id]);
4009 
4010 	/* Check if VF is disabled. */
4011 	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4012 		return I40E_ERR_PARAM;
4013 
4014 	/* perform basic checks on the msg */
4015 	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4016 
4017 	if (ret) {
4018 		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
4019 		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4020 			local_vf_id, v_opcode, msglen);
4021 		switch (ret) {
4022 		case VIRTCHNL_STATUS_ERR_PARAM:
4023 			return -EPERM;
4024 		default:
4025 			return -EINVAL;
4026 		}
4027 	}
4028 
4029 	switch (v_opcode) {
4030 	case VIRTCHNL_OP_VERSION:
4031 		ret = i40e_vc_get_version_msg(vf, msg);
4032 		break;
4033 	case VIRTCHNL_OP_GET_VF_RESOURCES:
4034 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4035 		i40e_vc_notify_vf_link_state(vf);
4036 		break;
4037 	case VIRTCHNL_OP_RESET_VF:
4038 		i40e_vc_reset_vf(vf, false);
4039 		ret = 0;
4040 		break;
4041 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4042 		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4043 		break;
4044 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4045 		ret = i40e_vc_config_queues_msg(vf, msg);
4046 		break;
4047 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4048 		ret = i40e_vc_config_irq_map_msg(vf, msg);
4049 		break;
4050 	case VIRTCHNL_OP_ENABLE_QUEUES:
4051 		ret = i40e_vc_enable_queues_msg(vf, msg);
4052 		i40e_vc_notify_vf_link_state(vf);
4053 		break;
4054 	case VIRTCHNL_OP_DISABLE_QUEUES:
4055 		ret = i40e_vc_disable_queues_msg(vf, msg);
4056 		break;
4057 	case VIRTCHNL_OP_ADD_ETH_ADDR:
4058 		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4059 		break;
4060 	case VIRTCHNL_OP_DEL_ETH_ADDR:
4061 		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4062 		break;
4063 	case VIRTCHNL_OP_ADD_VLAN:
4064 		ret = i40e_vc_add_vlan_msg(vf, msg);
4065 		break;
4066 	case VIRTCHNL_OP_DEL_VLAN:
4067 		ret = i40e_vc_remove_vlan_msg(vf, msg);
4068 		break;
4069 	case VIRTCHNL_OP_GET_STATS:
4070 		ret = i40e_vc_get_stats_msg(vf, msg);
4071 		break;
4072 	case VIRTCHNL_OP_IWARP:
4073 		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
4074 		break;
4075 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
4076 		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
4077 		break;
4078 	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
4079 		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
4080 		break;
4081 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4082 		ret = i40e_vc_config_rss_key(vf, msg);
4083 		break;
4084 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4085 		ret = i40e_vc_config_rss_lut(vf, msg);
4086 		break;
4087 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4088 		ret = i40e_vc_get_rss_hena(vf, msg);
4089 		break;
4090 	case VIRTCHNL_OP_SET_RSS_HENA:
4091 		ret = i40e_vc_set_rss_hena(vf, msg);
4092 		break;
4093 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4094 		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4095 		break;
4096 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4097 		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4098 		break;
4099 	case VIRTCHNL_OP_REQUEST_QUEUES:
4100 		ret = i40e_vc_request_queues_msg(vf, msg);
4101 		break;
4102 	case VIRTCHNL_OP_ENABLE_CHANNELS:
4103 		ret = i40e_vc_add_qch_msg(vf, msg);
4104 		break;
4105 	case VIRTCHNL_OP_DISABLE_CHANNELS:
4106 		ret = i40e_vc_del_qch_msg(vf, msg);
4107 		break;
4108 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4109 		ret = i40e_vc_add_cloud_filter(vf, msg);
4110 		break;
4111 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4112 		ret = i40e_vc_del_cloud_filter(vf, msg);
4113 		break;
4114 	case VIRTCHNL_OP_UNKNOWN:
4115 	default:
4116 		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4117 			v_opcode, local_vf_id);
4118 		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4119 					      I40E_ERR_NOT_IMPLEMENTED);
4120 		break;
4121 	}
4122 
4123 	return ret;
4124 }
4125 
4126 /**
4127  * i40e_vc_process_vflr_event
4128  * @pf: pointer to the PF structure
4129  *
4130  * called from the vlfr irq handler to
4131  * free up VF resources and state variables
4132  **/
i40e_vc_process_vflr_event(struct i40e_pf * pf)4133 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4134 {
4135 	struct i40e_hw *hw = &pf->hw;
4136 	u32 reg, reg_idx, bit_idx;
4137 	struct i40e_vf *vf;
4138 	int vf_id;
4139 
4140 	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4141 		return 0;
4142 
4143 	/* Re-enable the VFLR interrupt cause here, before looking for which
4144 	 * VF got reset. Otherwise, if another VF gets a reset while the
4145 	 * first one is being processed, that interrupt will be lost, and
4146 	 * that VF will be stuck in reset forever.
4147 	 */
4148 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4149 	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4150 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4151 	i40e_flush(hw);
4152 
4153 	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4154 	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4155 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4156 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4157 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4158 		vf = &pf->vf[vf_id];
4159 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4160 		if (reg & BIT(bit_idx))
4161 			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4162 			i40e_reset_vf(vf, true);
4163 	}
4164 
4165 	return 0;
4166 }
4167 
4168 /**
4169  * i40e_validate_vf
4170  * @pf: the physical function
4171  * @vf_id: VF identifier
4172  *
4173  * Check that the VF is enabled and the VSI exists.
4174  *
4175  * Returns 0 on success, negative on failure
4176  **/
i40e_validate_vf(struct i40e_pf * pf,int vf_id)4177 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4178 {
4179 	struct i40e_vsi *vsi;
4180 	struct i40e_vf *vf;
4181 	int ret = 0;
4182 
4183 	if (vf_id >= pf->num_alloc_vfs) {
4184 		dev_err(&pf->pdev->dev,
4185 			"Invalid VF Identifier %d\n", vf_id);
4186 		ret = -EINVAL;
4187 		goto err_out;
4188 	}
4189 	vf = &pf->vf[vf_id];
4190 	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4191 	if (!vsi)
4192 		ret = -EINVAL;
4193 err_out:
4194 	return ret;
4195 }
4196 
4197 /**
4198  * i40e_ndo_set_vf_mac
4199  * @netdev: network interface device structure
4200  * @vf_id: VF identifier
4201  * @mac: mac address
4202  *
4203  * program VF mac address
4204  **/
i40e_ndo_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)4205 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4206 {
4207 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4208 	struct i40e_vsi *vsi = np->vsi;
4209 	struct i40e_pf *pf = vsi->back;
4210 	struct i40e_mac_filter *f;
4211 	struct i40e_vf *vf;
4212 	int ret = 0;
4213 	struct hlist_node *h;
4214 	int bkt;
4215 	u8 i;
4216 
4217 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4218 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4219 		return -EAGAIN;
4220 	}
4221 
4222 	/* validate the request */
4223 	ret = i40e_validate_vf(pf, vf_id);
4224 	if (ret)
4225 		goto error_param;
4226 
4227 	vf = &pf->vf[vf_id];
4228 
4229 	/* When the VF is resetting wait until it is done.
4230 	 * It can take up to 200 milliseconds,
4231 	 * but wait for up to 300 milliseconds to be safe.
4232 	 * Acquire the VSI pointer only after the VF has been
4233 	 * properly initialized.
4234 	 */
4235 	for (i = 0; i < 15; i++) {
4236 		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4237 			break;
4238 		msleep(20);
4239 	}
4240 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4241 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4242 			vf_id);
4243 		ret = -EAGAIN;
4244 		goto error_param;
4245 	}
4246 	vsi = pf->vsi[vf->lan_vsi_idx];
4247 
4248 	if (is_multicast_ether_addr(mac)) {
4249 		dev_err(&pf->pdev->dev,
4250 			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4251 		ret = -EINVAL;
4252 		goto error_param;
4253 	}
4254 
4255 	/* Lock once because below invoked function add/del_filter requires
4256 	 * mac_filter_hash_lock to be held
4257 	 */
4258 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4259 
4260 	/* delete the temporary mac address */
4261 	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4262 		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4263 
4264 	/* Delete all the filters for this VSI - we're going to kill it
4265 	 * anyway.
4266 	 */
4267 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4268 		__i40e_del_filter(vsi, f);
4269 
4270 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4271 
4272 	/* program mac filter */
4273 	if (i40e_sync_vsi_filters(vsi)) {
4274 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4275 		ret = -EIO;
4276 		goto error_param;
4277 	}
4278 	ether_addr_copy(vf->default_lan_addr.addr, mac);
4279 
4280 	if (is_zero_ether_addr(mac)) {
4281 		vf->pf_set_mac = false;
4282 		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4283 	} else {
4284 		vf->pf_set_mac = true;
4285 		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4286 			 mac, vf_id);
4287 	}
4288 
4289 	/* Force the VF interface down so it has to bring up with new MAC
4290 	 * address
4291 	 */
4292 	i40e_vc_reset_vf(vf, true);
4293 	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4294 
4295 error_param:
4296 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4297 	return ret;
4298 }
4299 
4300 /**
4301  * i40e_ndo_set_vf_port_vlan
4302  * @netdev: network interface device structure
4303  * @vf_id: VF identifier
4304  * @vlan_id: mac address
4305  * @qos: priority setting
4306  * @vlan_proto: vlan protocol
4307  *
4308  * program VF vlan id and/or qos
4309  **/
i40e_ndo_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)4310 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4311 			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4312 {
4313 	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4314 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4315 	bool allmulti = false, alluni = false;
4316 	struct i40e_pf *pf = np->vsi->back;
4317 	struct i40e_vsi *vsi;
4318 	struct i40e_vf *vf;
4319 	int ret = 0;
4320 
4321 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4322 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4323 		return -EAGAIN;
4324 	}
4325 
4326 	/* validate the request */
4327 	ret = i40e_validate_vf(pf, vf_id);
4328 	if (ret)
4329 		goto error_pvid;
4330 
4331 	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4332 		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4333 		ret = -EINVAL;
4334 		goto error_pvid;
4335 	}
4336 
4337 	if (vlan_proto != htons(ETH_P_8021Q)) {
4338 		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4339 		ret = -EPROTONOSUPPORT;
4340 		goto error_pvid;
4341 	}
4342 
4343 	vf = &pf->vf[vf_id];
4344 	vsi = pf->vsi[vf->lan_vsi_idx];
4345 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4346 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4347 			vf_id);
4348 		ret = -EAGAIN;
4349 		goto error_pvid;
4350 	}
4351 
4352 	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4353 		/* duplicate request, so just return success */
4354 		goto error_pvid;
4355 
4356 	i40e_vc_reset_vf(vf, true);
4357 	/* During reset the VF got a new VSI, so refresh a pointer. */
4358 	vsi = pf->vsi[vf->lan_vsi_idx];
4359 	/* Locked once because multiple functions below iterate list */
4360 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4361 
4362 	/* Check for condition where there was already a port VLAN ID
4363 	 * filter set and now it is being deleted by setting it to zero.
4364 	 * Additionally check for the condition where there was a port
4365 	 * VLAN but now there is a new and different port VLAN being set.
4366 	 * Before deleting all the old VLAN filters we must add new ones
4367 	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4368 	 * MAC addresses deleted.
4369 	 */
4370 	if ((!(vlan_id || qos) ||
4371 	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4372 	    vsi->info.pvid) {
4373 		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4374 		if (ret) {
4375 			dev_info(&vsi->back->pdev->dev,
4376 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4377 				 vsi->back->hw.aq.asq_last_status);
4378 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4379 			goto error_pvid;
4380 		}
4381 	}
4382 
4383 	if (vsi->info.pvid) {
4384 		/* remove all filters on the old VLAN */
4385 		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4386 					   VLAN_VID_MASK));
4387 	}
4388 
4389 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4390 
4391 	/* disable promisc modes in case they were enabled */
4392 	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4393 					      allmulti, alluni);
4394 	if (ret) {
4395 		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4396 		goto error_pvid;
4397 	}
4398 
4399 	if (vlan_id || qos)
4400 		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4401 	else
4402 		i40e_vsi_remove_pvid(vsi);
4403 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4404 
4405 	if (vlan_id) {
4406 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4407 			 vlan_id, qos, vf_id);
4408 
4409 		/* add new VLAN filter for each MAC */
4410 		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4411 		if (ret) {
4412 			dev_info(&vsi->back->pdev->dev,
4413 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4414 				 vsi->back->hw.aq.asq_last_status);
4415 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4416 			goto error_pvid;
4417 		}
4418 
4419 		/* remove the previously added non-VLAN MAC filters */
4420 		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4421 	}
4422 
4423 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4424 
4425 	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4426 		alluni = true;
4427 
4428 	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4429 		allmulti = true;
4430 
4431 	/* Schedule the worker thread to take care of applying changes */
4432 	i40e_service_event_schedule(vsi->back);
4433 
4434 	if (ret) {
4435 		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4436 		goto error_pvid;
4437 	}
4438 
4439 	/* The Port VLAN needs to be saved across resets the same as the
4440 	 * default LAN MAC address.
4441 	 */
4442 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4443 
4444 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4445 	if (ret) {
4446 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4447 		goto error_pvid;
4448 	}
4449 
4450 	ret = 0;
4451 
4452 error_pvid:
4453 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4454 	return ret;
4455 }
4456 
4457 /**
4458  * i40e_ndo_set_vf_bw
4459  * @netdev: network interface device structure
4460  * @vf_id: VF identifier
4461  * @min_tx_rate: Minimum Tx rate
4462  * @max_tx_rate: Maximum Tx rate
4463  *
4464  * configure VF Tx rate
4465  **/
i40e_ndo_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)4466 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4467 		       int max_tx_rate)
4468 {
4469 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4470 	struct i40e_pf *pf = np->vsi->back;
4471 	struct i40e_vsi *vsi;
4472 	struct i40e_vf *vf;
4473 	int ret = 0;
4474 
4475 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4476 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4477 		return -EAGAIN;
4478 	}
4479 
4480 	/* validate the request */
4481 	ret = i40e_validate_vf(pf, vf_id);
4482 	if (ret)
4483 		goto error;
4484 
4485 	if (min_tx_rate) {
4486 		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4487 			min_tx_rate, vf_id);
4488 		ret = -EINVAL;
4489 		goto error;
4490 	}
4491 
4492 	vf = &pf->vf[vf_id];
4493 	vsi = pf->vsi[vf->lan_vsi_idx];
4494 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4495 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4496 			vf_id);
4497 		ret = -EAGAIN;
4498 		goto error;
4499 	}
4500 
4501 	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4502 	if (ret)
4503 		goto error;
4504 
4505 	vf->tx_rate = max_tx_rate;
4506 error:
4507 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4508 	return ret;
4509 }
4510 
4511 /**
4512  * i40e_ndo_get_vf_config
4513  * @netdev: network interface device structure
4514  * @vf_id: VF identifier
4515  * @ivi: VF configuration structure
4516  *
4517  * return VF configuration
4518  **/
i40e_ndo_get_vf_config(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4519 int i40e_ndo_get_vf_config(struct net_device *netdev,
4520 			   int vf_id, struct ifla_vf_info *ivi)
4521 {
4522 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4523 	struct i40e_vsi *vsi = np->vsi;
4524 	struct i40e_pf *pf = vsi->back;
4525 	struct i40e_vf *vf;
4526 	int ret = 0;
4527 
4528 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4529 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4530 		return -EAGAIN;
4531 	}
4532 
4533 	/* validate the request */
4534 	ret = i40e_validate_vf(pf, vf_id);
4535 	if (ret)
4536 		goto error_param;
4537 
4538 	vf = &pf->vf[vf_id];
4539 	/* first vsi is always the LAN vsi */
4540 	vsi = pf->vsi[vf->lan_vsi_idx];
4541 	if (!vsi) {
4542 		ret = -ENOENT;
4543 		goto error_param;
4544 	}
4545 
4546 	ivi->vf = vf_id;
4547 
4548 	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4549 
4550 	ivi->max_tx_rate = vf->tx_rate;
4551 	ivi->min_tx_rate = 0;
4552 	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4553 	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4554 		   I40E_VLAN_PRIORITY_SHIFT;
4555 	if (vf->link_forced == false)
4556 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4557 	else if (vf->link_up == true)
4558 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4559 	else
4560 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4561 	ivi->spoofchk = vf->spoofchk;
4562 	ivi->trusted = vf->trusted;
4563 	ret = 0;
4564 
4565 error_param:
4566 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4567 	return ret;
4568 }
4569 
4570 /**
4571  * i40e_ndo_set_vf_link_state
4572  * @netdev: network interface device structure
4573  * @vf_id: VF identifier
4574  * @link: required link state
4575  *
4576  * Set the link state of a specified VF, regardless of physical link state
4577  **/
i40e_ndo_set_vf_link_state(struct net_device * netdev,int vf_id,int link)4578 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4579 {
4580 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4581 	struct i40e_pf *pf = np->vsi->back;
4582 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4583 	struct virtchnl_pf_event pfe;
4584 	struct i40e_hw *hw = &pf->hw;
4585 	struct i40e_vf *vf;
4586 	int abs_vf_id;
4587 	int ret = 0;
4588 
4589 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4590 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4591 		return -EAGAIN;
4592 	}
4593 
4594 	/* validate the request */
4595 	if (vf_id >= pf->num_alloc_vfs) {
4596 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4597 		ret = -EINVAL;
4598 		goto error_out;
4599 	}
4600 
4601 	vf = &pf->vf[vf_id];
4602 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4603 
4604 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4605 	pfe.severity = PF_EVENT_SEVERITY_INFO;
4606 
4607 	switch (link) {
4608 	case IFLA_VF_LINK_STATE_AUTO:
4609 		vf->link_forced = false;
4610 		i40e_set_vf_link_state(vf, &pfe, ls);
4611 		break;
4612 	case IFLA_VF_LINK_STATE_ENABLE:
4613 		vf->link_forced = true;
4614 		vf->link_up = true;
4615 		i40e_set_vf_link_state(vf, &pfe, ls);
4616 		break;
4617 	case IFLA_VF_LINK_STATE_DISABLE:
4618 		vf->link_forced = true;
4619 		vf->link_up = false;
4620 		i40e_set_vf_link_state(vf, &pfe, ls);
4621 		break;
4622 	default:
4623 		ret = -EINVAL;
4624 		goto error_out;
4625 	}
4626 	/* Notify the VF of its new link state */
4627 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4628 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4629 
4630 error_out:
4631 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4632 	return ret;
4633 }
4634 
4635 /**
4636  * i40e_ndo_set_vf_spoofchk
4637  * @netdev: network interface device structure
4638  * @vf_id: VF identifier
4639  * @enable: flag to enable or disable feature
4640  *
4641  * Enable or disable VF spoof checking
4642  **/
i40e_ndo_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool enable)4643 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4644 {
4645 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4646 	struct i40e_vsi *vsi = np->vsi;
4647 	struct i40e_pf *pf = vsi->back;
4648 	struct i40e_vsi_context ctxt;
4649 	struct i40e_hw *hw = &pf->hw;
4650 	struct i40e_vf *vf;
4651 	int ret = 0;
4652 
4653 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4654 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4655 		return -EAGAIN;
4656 	}
4657 
4658 	/* validate the request */
4659 	if (vf_id >= pf->num_alloc_vfs) {
4660 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4661 		ret = -EINVAL;
4662 		goto out;
4663 	}
4664 
4665 	vf = &(pf->vf[vf_id]);
4666 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4667 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4668 			vf_id);
4669 		ret = -EAGAIN;
4670 		goto out;
4671 	}
4672 
4673 	if (enable == vf->spoofchk)
4674 		goto out;
4675 
4676 	vf->spoofchk = enable;
4677 	memset(&ctxt, 0, sizeof(ctxt));
4678 	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4679 	ctxt.pf_num = pf->hw.pf_id;
4680 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4681 	if (enable)
4682 		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4683 					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4684 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4685 	if (ret) {
4686 		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4687 			ret);
4688 		ret = -EIO;
4689 	}
4690 out:
4691 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4692 	return ret;
4693 }
4694 
4695 /**
4696  * i40e_ndo_set_vf_trust
4697  * @netdev: network interface device structure of the pf
4698  * @vf_id: VF identifier
4699  * @setting: trust setting
4700  *
4701  * Enable or disable VF trust setting
4702  **/
i40e_ndo_set_vf_trust(struct net_device * netdev,int vf_id,bool setting)4703 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4704 {
4705 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4706 	struct i40e_pf *pf = np->vsi->back;
4707 	struct i40e_vf *vf;
4708 	int ret = 0;
4709 
4710 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4711 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4712 		return -EAGAIN;
4713 	}
4714 
4715 	/* validate the request */
4716 	if (vf_id >= pf->num_alloc_vfs) {
4717 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4718 		ret = -EINVAL;
4719 		goto out;
4720 	}
4721 
4722 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4723 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4724 		ret = -EINVAL;
4725 		goto out;
4726 	}
4727 
4728 	vf = &pf->vf[vf_id];
4729 
4730 	if (setting == vf->trusted)
4731 		goto out;
4732 
4733 	vf->trusted = setting;
4734 	i40e_vc_reset_vf(vf, true);
4735 	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4736 		 vf_id, setting ? "" : "un");
4737 
4738 	if (vf->adq_enabled) {
4739 		if (!vf->trusted) {
4740 			dev_info(&pf->pdev->dev,
4741 				 "VF %u no longer Trusted, deleting all cloud filters\n",
4742 				 vf_id);
4743 			i40e_del_all_cloud_filters(vf);
4744 		}
4745 	}
4746 
4747 out:
4748 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4749 	return ret;
4750 }
4751 
4752 /**
4753  * i40e_get_vf_stats - populate some stats for the VF
4754  * @netdev: the netdev of the PF
4755  * @vf_id: the host OS identifier (0-127)
4756  * @vf_stats: pointer to the OS memory to be initialized
4757  */
i40e_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4758 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4759 		      struct ifla_vf_stats *vf_stats)
4760 {
4761 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4762 	struct i40e_pf *pf = np->vsi->back;
4763 	struct i40e_eth_stats *stats;
4764 	struct i40e_vsi *vsi;
4765 	struct i40e_vf *vf;
4766 
4767 	/* validate the request */
4768 	if (i40e_validate_vf(pf, vf_id))
4769 		return -EINVAL;
4770 
4771 	vf = &pf->vf[vf_id];
4772 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4773 		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4774 		return -EBUSY;
4775 	}
4776 
4777 	vsi = pf->vsi[vf->lan_vsi_idx];
4778 	if (!vsi)
4779 		return -EINVAL;
4780 
4781 	i40e_update_eth_stats(vsi);
4782 	stats = &vsi->eth_stats;
4783 
4784 	memset(vf_stats, 0, sizeof(*vf_stats));
4785 
4786 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4787 		stats->rx_multicast;
4788 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4789 		stats->tx_multicast;
4790 	vf_stats->rx_bytes   = stats->rx_bytes;
4791 	vf_stats->tx_bytes   = stats->tx_bytes;
4792 	vf_stats->broadcast  = stats->rx_broadcast;
4793 	vf_stats->multicast  = stats->rx_multicast;
4794 	vf_stats->rx_dropped = stats->rx_discards;
4795 	vf_stats->tx_dropped = stats->tx_discards;
4796 
4797 	return 0;
4798 }
4799