1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
ice_release_vf(struct kref * ref)55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	vf->vf_ops->free(vf);
60 }
61 
62 /**
63  * ice_put_vf - Release a reference to a VF
64  * @vf: the VF structure to decrease reference count on
65  *
66  * Decrease the reference count for a VF, and free the entry if it is no
67  * longer in use.
68  *
69  * This must be called after ice_get_vf_by_id() once the reference to the VF
70  * structure is no longer used. Otherwise, the VF structure will never be
71  * freed.
72  */
ice_put_vf(struct ice_vf * vf)73 void ice_put_vf(struct ice_vf *vf)
74 {
75 	kref_put(&vf->refcnt, ice_release_vf);
76 }
77 
78 /**
79  * ice_has_vfs - Return true if the PF has any associated VFs
80  * @pf: the PF private structure
81  *
82  * Return whether or not the PF has any allocated VFs.
83  *
84  * Note that this function only guarantees that there are no VFs at the point
85  * of calling it. It does not guarantee that no more VFs will be added.
86  */
ice_has_vfs(struct ice_pf * pf)87 bool ice_has_vfs(struct ice_pf *pf)
88 {
89 	/* A simple check that the hash table is not empty does not require
90 	 * the mutex or rcu_read_lock.
91 	 */
92 	return !hash_empty(pf->vfs.table);
93 }
94 
95 /**
96  * ice_get_num_vfs - Get number of allocated VFs
97  * @pf: the PF private structure
98  *
99  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
100  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
101  * the output of this function.
102  */
ice_get_num_vfs(struct ice_pf * pf)103 u16 ice_get_num_vfs(struct ice_pf *pf)
104 {
105 	struct ice_vf *vf;
106 	unsigned int bkt;
107 	u16 num_vfs = 0;
108 
109 	rcu_read_lock();
110 	ice_for_each_vf_rcu(pf, bkt, vf)
111 		num_vfs++;
112 	rcu_read_unlock();
113 
114 	return num_vfs;
115 }
116 
117 /**
118  * ice_get_vf_vsi - get VF's VSI based on the stored index
119  * @vf: VF used to get VSI
120  */
ice_get_vf_vsi(struct ice_vf * vf)121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
122 {
123 	if (vf->lan_vsi_idx == ICE_NO_VSI)
124 		return NULL;
125 
126 	return vf->pf->vsi[vf->lan_vsi_idx];
127 }
128 
129 /**
130  * ice_is_vf_disabled
131  * @vf: pointer to the VF info
132  *
133  * If the PF has been disabled, there is no need resetting VF until PF is
134  * active again. Similarly, if the VF has been disabled, this means something
135  * else is resetting the VF, so we shouldn't continue.
136  *
137  * Returns true if the caller should consider the VF as disabled whether
138  * because that single VF is explicitly disabled or because the PF is
139  * currently disabled.
140  */
ice_is_vf_disabled(struct ice_vf * vf)141 bool ice_is_vf_disabled(struct ice_vf *vf)
142 {
143 	struct ice_pf *pf = vf->pf;
144 
145 	return (test_bit(ICE_VF_DIS, pf->state) ||
146 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
147 }
148 
149 /**
150  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
151  * @vf: The VF being resseting
152  *
153  * The max poll time is about ~800ms, which is about the maximum time it takes
154  * for a VF to be reset and/or a VF driver to be removed.
155  */
ice_wait_on_vf_reset(struct ice_vf * vf)156 static void ice_wait_on_vf_reset(struct ice_vf *vf)
157 {
158 	int i;
159 
160 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
161 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
162 			break;
163 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
164 	}
165 }
166 
167 /**
168  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
169  * @vf: VF to check if it's ready to be configured/queried
170  *
171  * The purpose of this function is to make sure the VF is not in reset, not
172  * disabled, and initialized so it can be configured and/or queried by a host
173  * administrator.
174  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
176 {
177 	ice_wait_on_vf_reset(vf);
178 
179 	if (ice_is_vf_disabled(vf))
180 		return -EINVAL;
181 
182 	if (ice_check_vf_init(vf))
183 		return -EBUSY;
184 
185 	return 0;
186 }
187 
188 /**
189  * ice_trigger_vf_reset - Reset a VF on HW
190  * @vf: pointer to the VF structure
191  * @is_vflr: true if VFLR was issued, false if not
192  * @is_pfr: true if the reset was triggered due to a previous PFR
193  *
194  * Trigger hardware to start a reset for a particular VF. Expects the caller
195  * to wait the proper amount of time to allow hardware to reset the VF before
196  * it cleans up and restores VF functionality.
197  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)198 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
199 {
200 	/* Inform VF that it is no longer active, as a warning */
201 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
202 
203 	/* Disable VF's configuration API during reset. The flag is re-enabled
204 	 * when it's safe again to access VF's VSI.
205 	 */
206 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
207 
208 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
209 	 * needs to clear them in the case of VFR/VFLR. If this is done for
210 	 * PFR, it can mess up VF resets because the VF driver may already
211 	 * have started cleanup by the time we get here.
212 	 */
213 	if (!is_pfr)
214 		vf->vf_ops->clear_mbx_register(vf);
215 
216 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
217 }
218 
ice_vf_clear_counters(struct ice_vf * vf)219 static void ice_vf_clear_counters(struct ice_vf *vf)
220 {
221 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
222 
223 	if (vsi)
224 		vsi->num_vlan = 0;
225 
226 	vf->num_mac = 0;
227 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229 }
230 
231 /**
232  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
233  * @vf: VF to perform pre VSI rebuild tasks
234  *
235  * These tasks are items that don't need to be amortized since they are most
236  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
237  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)238 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
239 {
240 	ice_vf_clear_counters(vf);
241 	vf->vf_ops->clear_reset_trigger(vf);
242 }
243 
244 /**
245  * ice_vf_rebuild_vsi - rebuild the VF's VSI
246  * @vf: VF to rebuild the VSI for
247  *
248  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
249  * host, PFR, CORER, etc.).
250  */
ice_vf_rebuild_vsi(struct ice_vf * vf)251 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
252 {
253 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
254 	struct ice_pf *pf = vf->pf;
255 
256 	if (WARN_ON(!vsi))
257 		return -EINVAL;
258 
259 	if (ice_vsi_rebuild(vsi, true)) {
260 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
261 			vf->vf_id);
262 		return -EIO;
263 	}
264 	/* vsi->idx will remain the same in this case so don't update
265 	 * vf->lan_vsi_idx
266 	 */
267 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
268 	vf->lan_vsi_num = vsi->vsi_num;
269 
270 	return 0;
271 }
272 
273 /**
274  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
275  * are in unicast promiscuous mode
276  * @pf: PF structure for accessing VF(s)
277  *
278  * Return false if no VF(s) are in unicast promiscuous mode,
279  * else return true
280  */
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)281 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
282 {
283 	bool is_vf_promisc = false;
284 	struct ice_vf *vf;
285 	unsigned int bkt;
286 
287 	rcu_read_lock();
288 	ice_for_each_vf_rcu(pf, bkt, vf) {
289 		/* found a VF that has promiscuous mode configured */
290 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
291 			is_vf_promisc = true;
292 			break;
293 		}
294 	}
295 	rcu_read_unlock();
296 
297 	return is_vf_promisc;
298 }
299 
300 /**
301  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
302  * @vf: the VF pointer
303  * @vsi: the VSI to configure
304  * @ucast_m: promiscuous mask to apply to unicast
305  * @mcast_m: promiscuous mask to apply to multicast
306  *
307  * Decide which mask should be used for unicast and multicast filter,
308  * based on presence of VLANs
309  */
310 void
ice_vf_get_promisc_masks(struct ice_vf * vf,struct ice_vsi * vsi,u8 * ucast_m,u8 * mcast_m)311 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
312 			 u8 *ucast_m, u8 *mcast_m)
313 {
314 	if (ice_vf_is_port_vlan_ena(vf) ||
315 	    ice_vsi_has_non_zero_vlans(vsi)) {
316 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
317 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
318 	} else {
319 		*mcast_m = ICE_MCAST_PROMISC_BITS;
320 		*ucast_m = ICE_UCAST_PROMISC_BITS;
321 	}
322 }
323 
324 /**
325  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
326  * @vf: the VF pointer
327  * @vsi: the VSI to configure
328  *
329  * Clear all promiscuous/allmulticast filters for a VF
330  */
331 static int
ice_vf_clear_all_promisc_modes(struct ice_vf * vf,struct ice_vsi * vsi)332 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
333 {
334 	struct ice_pf *pf = vf->pf;
335 	u8 ucast_m, mcast_m;
336 	int ret = 0;
337 
338 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
339 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
340 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
341 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
342 				ret = ice_clear_dflt_vsi(vsi);
343 		} else {
344 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
345 		}
346 
347 		if (ret) {
348 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
349 		} else {
350 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
351 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
352 		}
353 	}
354 
355 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
356 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
357 		if (ret) {
358 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
359 		} else {
360 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
361 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
362 		}
363 	}
364 	return ret;
365 }
366 
367 /**
368  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
369  * @vf: the VF to configure
370  * @vsi: the VF's VSI
371  * @promisc_m: the promiscuous mode to enable
372  */
373 int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)374 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
375 {
376 	struct ice_hw *hw = &vsi->back->hw;
377 	int status;
378 
379 	if (ice_vf_is_port_vlan_ena(vf))
380 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
381 						  ice_vf_get_port_vlan_id(vf));
382 	else if (ice_vsi_has_non_zero_vlans(vsi))
383 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
384 	else
385 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
386 
387 	if (status && status != -EEXIST) {
388 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
389 			vf->vf_id, status);
390 		return status;
391 	}
392 
393 	return 0;
394 }
395 
396 /**
397  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
398  * @vf: the VF to configure
399  * @vsi: the VF's VSI
400  * @promisc_m: the promiscuous mode to disable
401  */
402 int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)403 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
404 {
405 	struct ice_hw *hw = &vsi->back->hw;
406 	int status;
407 
408 	if (ice_vf_is_port_vlan_ena(vf))
409 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
410 						    ice_vf_get_port_vlan_id(vf));
411 	else if (ice_vsi_has_non_zero_vlans(vsi))
412 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
413 	else
414 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
415 
416 	if (status && status != -ENOENT) {
417 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
418 			vf->vf_id, status);
419 		return status;
420 	}
421 
422 	return 0;
423 }
424 
425 /**
426  * ice_reset_all_vfs - reset all allocated VFs in one go
427  * @pf: pointer to the PF structure
428  *
429  * Reset all VFs at once, in response to a PF or other device reset.
430  *
431  * First, tell the hardware to reset each VF, then do all the waiting in one
432  * chunk, and finally finish restoring each VF after the wait. This is useful
433  * during PF routines which need to reset all VFs, as otherwise it must perform
434  * these resets in a serialized fashion.
435  */
ice_reset_all_vfs(struct ice_pf * pf)436 void ice_reset_all_vfs(struct ice_pf *pf)
437 {
438 	struct device *dev = ice_pf_to_dev(pf);
439 	struct ice_hw *hw = &pf->hw;
440 	struct ice_vf *vf;
441 	unsigned int bkt;
442 
443 	/* If we don't have any VFs, then there is nothing to reset */
444 	if (!ice_has_vfs(pf))
445 		return;
446 
447 	mutex_lock(&pf->vfs.table_lock);
448 
449 	/* clear all malicious info if the VFs are getting reset */
450 	ice_for_each_vf(pf, bkt, vf)
451 		if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
452 					ICE_MAX_SRIOV_VFS, vf->vf_id))
453 			dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
454 				vf->vf_id);
455 
456 	/* If VFs have been disabled, there is no need to reset */
457 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
458 		mutex_unlock(&pf->vfs.table_lock);
459 		return;
460 	}
461 
462 	/* Begin reset on all VFs at once */
463 	ice_for_each_vf(pf, bkt, vf)
464 		ice_trigger_vf_reset(vf, true, true);
465 
466 	/* HW requires some time to make sure it can flush the FIFO for a VF
467 	 * when it resets it. Now that we've triggered all of the VFs, iterate
468 	 * the table again and wait for each VF to complete.
469 	 */
470 	ice_for_each_vf(pf, bkt, vf) {
471 		if (!vf->vf_ops->poll_reset_status(vf)) {
472 			/* Display a warning if at least one VF didn't manage
473 			 * to reset in time, but continue on with the
474 			 * operation.
475 			 */
476 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
477 			break;
478 		}
479 	}
480 
481 	/* free VF resources to begin resetting the VSI state */
482 	ice_for_each_vf(pf, bkt, vf) {
483 		mutex_lock(&vf->cfg_lock);
484 
485 		vf->driver_caps = 0;
486 		ice_vc_set_default_allowlist(vf);
487 
488 		ice_vf_fdir_exit(vf);
489 		ice_vf_fdir_init(vf);
490 		/* clean VF control VSI when resetting VFs since it should be
491 		 * setup only when VF creates its first FDIR rule.
492 		 */
493 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
494 			ice_vf_ctrl_invalidate_vsi(vf);
495 
496 		ice_vf_pre_vsi_rebuild(vf);
497 		ice_vf_rebuild_vsi(vf);
498 		vf->vf_ops->post_vsi_rebuild(vf);
499 
500 		mutex_unlock(&vf->cfg_lock);
501 	}
502 
503 	if (ice_is_eswitch_mode_switchdev(pf))
504 		if (ice_eswitch_rebuild(pf))
505 			dev_warn(dev, "eswitch rebuild failed\n");
506 
507 	ice_flush(hw);
508 	clear_bit(ICE_VF_DIS, pf->state);
509 
510 	mutex_unlock(&pf->vfs.table_lock);
511 }
512 
513 /**
514  * ice_notify_vf_reset - Notify VF of a reset event
515  * @vf: pointer to the VF structure
516  */
ice_notify_vf_reset(struct ice_vf * vf)517 static void ice_notify_vf_reset(struct ice_vf *vf)
518 {
519 	struct ice_hw *hw = &vf->pf->hw;
520 	struct virtchnl_pf_event pfe;
521 
522 	/* Bail out if VF is in disabled state, neither initialized, nor active
523 	 * state - otherwise proceed with notifications
524 	 */
525 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
526 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
527 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
528 		return;
529 
530 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
531 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
532 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
533 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
534 			      NULL);
535 }
536 
537 /**
538  * ice_reset_vf - Reset a particular VF
539  * @vf: pointer to the VF structure
540  * @flags: flags controlling behavior of the reset
541  *
542  * Flags:
543  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
544  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
545  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
546  *
547  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
548  * the VF resets successfully. Returns an error code if the VF fails to
549  * rebuild.
550  */
ice_reset_vf(struct ice_vf * vf,u32 flags)551 int ice_reset_vf(struct ice_vf *vf, u32 flags)
552 {
553 	struct ice_pf *pf = vf->pf;
554 	struct ice_vsi *vsi;
555 	struct device *dev;
556 	struct ice_hw *hw;
557 	int err = 0;
558 	bool rsd;
559 
560 	dev = ice_pf_to_dev(pf);
561 	hw = &pf->hw;
562 
563 	if (flags & ICE_VF_RESET_NOTIFY)
564 		ice_notify_vf_reset(vf);
565 
566 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
567 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
568 			vf->vf_id);
569 		return 0;
570 	}
571 
572 	if (ice_is_vf_disabled(vf)) {
573 		vsi = ice_get_vf_vsi(vf);
574 		if (!vsi) {
575 			dev_dbg(dev, "VF is already removed\n");
576 			return -EINVAL;
577 		}
578 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
579 
580 		if (ice_vsi_is_rx_queue_active(vsi))
581 			ice_vsi_stop_all_rx_rings(vsi);
582 
583 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
584 			vf->vf_id);
585 		return 0;
586 	}
587 
588 	if (flags & ICE_VF_RESET_LOCK)
589 		mutex_lock(&vf->cfg_lock);
590 	else
591 		lockdep_assert_held(&vf->cfg_lock);
592 
593 	/* Set VF disable bit state here, before triggering reset */
594 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
595 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
596 
597 	vsi = ice_get_vf_vsi(vf);
598 	if (WARN_ON(!vsi)) {
599 		err = -EIO;
600 		goto out_unlock;
601 	}
602 
603 	ice_dis_vf_qs(vf);
604 
605 	/* Call Disable LAN Tx queue AQ whether or not queues are
606 	 * enabled. This is needed for successful completion of VFR.
607 	 */
608 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
609 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
610 
611 	/* poll VPGEN_VFRSTAT reg to make sure
612 	 * that reset is complete
613 	 */
614 	rsd = vf->vf_ops->poll_reset_status(vf);
615 
616 	/* Display a warning if VF didn't manage to reset in time, but need to
617 	 * continue on with the operation.
618 	 */
619 	if (!rsd)
620 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
621 
622 	vf->driver_caps = 0;
623 	ice_vc_set_default_allowlist(vf);
624 
625 	/* disable promiscuous modes in case they were enabled
626 	 * ignore any error if disabling process failed
627 	 */
628 	ice_vf_clear_all_promisc_modes(vf, vsi);
629 
630 	ice_eswitch_del_vf_mac_rule(vf);
631 
632 	ice_vf_fdir_exit(vf);
633 	ice_vf_fdir_init(vf);
634 	/* clean VF control VSI when resetting VF since it should be setup
635 	 * only when VF creates its first FDIR rule.
636 	 */
637 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
638 		ice_vf_ctrl_vsi_release(vf);
639 
640 	ice_vf_pre_vsi_rebuild(vf);
641 
642 	if (vf->vf_ops->vsi_rebuild(vf)) {
643 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
644 			vf->vf_id);
645 		err = -EFAULT;
646 		goto out_unlock;
647 	}
648 
649 	vf->vf_ops->post_vsi_rebuild(vf);
650 	vsi = ice_get_vf_vsi(vf);
651 	if (WARN_ON(!vsi)) {
652 		err = -EINVAL;
653 		goto out_unlock;
654 	}
655 
656 	ice_eswitch_update_repr(vsi);
657 	ice_eswitch_replay_vf_mac_rule(vf);
658 
659 	/* if the VF has been reset allow it to come up again */
660 	if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
661 				ICE_MAX_SRIOV_VFS, vf->vf_id))
662 		dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
663 			vf->vf_id);
664 
665 out_unlock:
666 	if (flags & ICE_VF_RESET_LOCK)
667 		mutex_unlock(&vf->cfg_lock);
668 
669 	return err;
670 }
671 
672 /**
673  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
674  * @vf: pointer to the VF structure
675  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)676 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
677 {
678 	/* Clear Rx/Tx enabled queues flag */
679 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
680 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
681 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
682 }
683 
684 /* Private functions only accessed from other virtualization files */
685 
686 /**
687  * ice_dis_vf_qs - Disable the VF queues
688  * @vf: pointer to the VF structure
689  */
ice_dis_vf_qs(struct ice_vf * vf)690 void ice_dis_vf_qs(struct ice_vf *vf)
691 {
692 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
693 
694 	if (WARN_ON(!vsi))
695 		return;
696 
697 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
698 	ice_vsi_stop_all_rx_rings(vsi);
699 	ice_set_vf_state_qs_dis(vf);
700 }
701 
702 /**
703  * ice_check_vf_init - helper to check if VF init complete
704  * @vf: the pointer to the VF to check
705  */
ice_check_vf_init(struct ice_vf * vf)706 int ice_check_vf_init(struct ice_vf *vf)
707 {
708 	struct ice_pf *pf = vf->pf;
709 
710 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
711 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
712 			vf->vf_id);
713 		return -EBUSY;
714 	}
715 	return 0;
716 }
717 
718 /**
719  * ice_vf_get_port_info - Get the VF's port info structure
720  * @vf: VF used to get the port info structure for
721  */
ice_vf_get_port_info(struct ice_vf * vf)722 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
723 {
724 	return vf->pf->hw.port_info;
725 }
726 
727 /**
728  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
729  * @vsi: the VSI to configure
730  * @enable: whether to enable or disable the spoof checking
731  *
732  * Configure a VSI to enable (or disable) spoof checking behavior.
733  */
ice_cfg_mac_antispoof(struct ice_vsi * vsi,bool enable)734 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
735 {
736 	struct ice_vsi_ctx *ctx;
737 	int err;
738 
739 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
740 	if (!ctx)
741 		return -ENOMEM;
742 
743 	ctx->info.sec_flags = vsi->info.sec_flags;
744 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
745 
746 	if (enable)
747 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
748 	else
749 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
750 
751 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
752 	if (err)
753 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
754 			enable ? "ON" : "OFF", vsi->vsi_num, err);
755 	else
756 		vsi->info.sec_flags = ctx->info.sec_flags;
757 
758 	kfree(ctx);
759 
760 	return err;
761 }
762 
763 /**
764  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
765  * @vsi: VSI to enable Tx spoof checking for
766  */
ice_vsi_ena_spoofchk(struct ice_vsi * vsi)767 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
768 {
769 	struct ice_vsi_vlan_ops *vlan_ops;
770 	int err = 0;
771 
772 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
773 
774 	/* Allow VF with VLAN 0 only to send all tagged traffic */
775 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
776 		err = vlan_ops->ena_tx_filtering(vsi);
777 		if (err)
778 			return err;
779 	}
780 
781 	return ice_cfg_mac_antispoof(vsi, true);
782 }
783 
784 /**
785  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
786  * @vsi: VSI to disable Tx spoof checking for
787  */
ice_vsi_dis_spoofchk(struct ice_vsi * vsi)788 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
789 {
790 	struct ice_vsi_vlan_ops *vlan_ops;
791 	int err;
792 
793 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
794 
795 	err = vlan_ops->dis_tx_filtering(vsi);
796 	if (err)
797 		return err;
798 
799 	return ice_cfg_mac_antispoof(vsi, false);
800 }
801 
802 /**
803  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
804  * @vsi: VSI associated to the VF
805  * @enable: whether to enable or disable the spoof checking
806  */
ice_vsi_apply_spoofchk(struct ice_vsi * vsi,bool enable)807 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
808 {
809 	int err;
810 
811 	if (enable)
812 		err = ice_vsi_ena_spoofchk(vsi);
813 	else
814 		err = ice_vsi_dis_spoofchk(vsi);
815 
816 	return err;
817 }
818 
819 /**
820  * ice_is_vf_trusted
821  * @vf: pointer to the VF info
822  */
ice_is_vf_trusted(struct ice_vf * vf)823 bool ice_is_vf_trusted(struct ice_vf *vf)
824 {
825 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
826 }
827 
828 /**
829  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
830  * @vf: the VF to check
831  *
832  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
833  * otherwise
834  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)835 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
836 {
837 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
838 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
839 }
840 
841 /**
842  * ice_is_vf_link_up - check if the VF's link is up
843  * @vf: VF to check if link is up
844  */
ice_is_vf_link_up(struct ice_vf * vf)845 bool ice_is_vf_link_up(struct ice_vf *vf)
846 {
847 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
848 
849 	if (ice_check_vf_init(vf))
850 		return false;
851 
852 	if (ice_vf_has_no_qs_ena(vf))
853 		return false;
854 	else if (vf->link_forced)
855 		return vf->link_up;
856 	else
857 		return pi->phy.link_info.link_info &
858 			ICE_AQ_LINK_UP;
859 }
860 
861 /**
862  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
863  * @vf: VF to configure trust setting for
864  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)865 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
866 {
867 	if (vf->trusted)
868 		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
869 	else
870 		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
871 }
872 
873 /**
874  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
875  * @vf: VF to add MAC filters for
876  *
877  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
878  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
879  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)880 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
881 {
882 	struct device *dev = ice_pf_to_dev(vf->pf);
883 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
884 	u8 broadcast[ETH_ALEN];
885 	int status;
886 
887 	if (WARN_ON(!vsi))
888 		return -EINVAL;
889 
890 	if (ice_is_eswitch_mode_switchdev(vf->pf))
891 		return 0;
892 
893 	eth_broadcast_addr(broadcast);
894 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
895 	if (status) {
896 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
897 			vf->vf_id, status);
898 		return status;
899 	}
900 
901 	vf->num_mac++;
902 
903 	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
904 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
905 					  ICE_FWD_TO_VSI);
906 		if (status) {
907 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
908 				&vf->hw_lan_addr.addr[0], vf->vf_id,
909 				status);
910 			return status;
911 		}
912 		vf->num_mac++;
913 
914 		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
915 	}
916 
917 	return 0;
918 }
919 
920 /**
921  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
922  * @vf: VF to add MAC filters for
923  * @vsi: Pointer to VSI
924  *
925  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
926  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
927  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf,struct ice_vsi * vsi)928 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
929 {
930 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
931 	struct device *dev = ice_pf_to_dev(vf->pf);
932 	int err;
933 
934 	if (ice_vf_is_port_vlan_ena(vf)) {
935 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
936 		if (err) {
937 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
938 				vf->vf_id, err);
939 			return err;
940 		}
941 
942 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
943 	} else {
944 		err = ice_vsi_add_vlan_zero(vsi);
945 	}
946 
947 	if (err) {
948 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
949 			ice_vf_is_port_vlan_ena(vf) ?
950 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
951 		return err;
952 	}
953 
954 	err = vlan_ops->ena_rx_filtering(vsi);
955 	if (err)
956 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
957 			 vf->vf_id, vsi->idx, err);
958 
959 	return 0;
960 }
961 
962 /**
963  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
964  * @vf: VF to re-apply the configuration for
965  *
966  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
967  * needs to re-apply the host configured Tx rate limiting configuration.
968  */
ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf * vf)969 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
970 {
971 	struct device *dev = ice_pf_to_dev(vf->pf);
972 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
973 	int err;
974 
975 	if (WARN_ON(!vsi))
976 		return -EINVAL;
977 
978 	if (vf->min_tx_rate) {
979 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
980 		if (err) {
981 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
982 				vf->min_tx_rate, vf->vf_id, err);
983 			return err;
984 		}
985 	}
986 
987 	if (vf->max_tx_rate) {
988 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
989 		if (err) {
990 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
991 				vf->max_tx_rate, vf->vf_id, err);
992 			return err;
993 		}
994 	}
995 
996 	return 0;
997 }
998 
999 /**
1000  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1001  * @vsi: Pointer to VSI
1002  *
1003  * This function moves VSI into corresponding scheduler aggregator node
1004  * based on cached value of "aggregator node info" per VSI
1005  */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)1006 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1007 {
1008 	struct ice_pf *pf = vsi->back;
1009 	struct device *dev;
1010 	int status;
1011 
1012 	if (!vsi->agg_node)
1013 		return;
1014 
1015 	dev = ice_pf_to_dev(pf);
1016 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1017 		dev_dbg(dev,
1018 			"agg_id %u already has reached max_num_vsis %u\n",
1019 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1020 		return;
1021 	}
1022 
1023 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1024 				     vsi->idx, vsi->tc_cfg.ena_tc);
1025 	if (status)
1026 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1027 			vsi->idx, vsi->agg_node->agg_id);
1028 	else
1029 		vsi->agg_node->num_vsis++;
1030 }
1031 
1032 /**
1033  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1034  * @vf: VF to rebuild host configuration on
1035  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)1036 void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1037 {
1038 	struct device *dev = ice_pf_to_dev(vf->pf);
1039 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1040 
1041 	if (WARN_ON(!vsi))
1042 		return;
1043 
1044 	ice_vf_set_host_trust_cfg(vf);
1045 
1046 	if (ice_vf_rebuild_host_mac_cfg(vf))
1047 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1048 			vf->vf_id);
1049 
1050 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
1051 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1052 			vf->vf_id);
1053 
1054 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
1055 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
1056 			vf->vf_id);
1057 
1058 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
1059 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
1060 			vf->vf_id);
1061 
1062 	/* rebuild aggregator node config for main VF VSI */
1063 	ice_vf_rebuild_aggregator_node_cfg(vsi);
1064 }
1065 
1066 /**
1067  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1068  * @vf: VF that control VSI is being invalidated on
1069  */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)1070 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1071 {
1072 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1073 }
1074 
1075 /**
1076  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1077  * @vf: VF that control VSI is being released on
1078  */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)1079 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1080 {
1081 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1082 	ice_vf_ctrl_invalidate_vsi(vf);
1083 }
1084 
1085 /**
1086  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1087  * @vf: VF to setup control VSI for
1088  *
1089  * Returns pointer to the successfully allocated VSI struct on success,
1090  * otherwise returns NULL on failure.
1091  */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)1092 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1093 {
1094 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1095 	struct ice_pf *pf = vf->pf;
1096 	struct ice_vsi *vsi;
1097 
1098 	vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
1099 	if (!vsi) {
1100 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1101 		ice_vf_ctrl_invalidate_vsi(vf);
1102 	}
1103 
1104 	return vsi;
1105 }
1106 
1107 /**
1108  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1109  * @vf: VF to remove access to VSI for
1110  */
ice_vf_invalidate_vsi(struct ice_vf * vf)1111 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1112 {
1113 	vf->lan_vsi_idx = ICE_NO_VSI;
1114 	vf->lan_vsi_num = ICE_NO_VSI;
1115 }
1116 
1117 /**
1118  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1119  * @vf: VF to set in initialized state
1120  *
1121  * After this function the VF will be ready to receive/handle the
1122  * VIRTCHNL_OP_GET_VF_RESOURCES message
1123  */
ice_vf_set_initialized(struct ice_vf * vf)1124 void ice_vf_set_initialized(struct ice_vf *vf)
1125 {
1126 	ice_set_vf_state_qs_dis(vf);
1127 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1128 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1129 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1130 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1131 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
1132 }
1133