1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
41 {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52
iavf_status_to_errno(enum iavf_status status)53 int iavf_status_to_errno(enum iavf_status status)
54 {
55 switch (status) {
56 case IAVF_SUCCESS:
57 return 0;
58 case IAVF_ERR_PARAM:
59 case IAVF_ERR_MAC_TYPE:
60 case IAVF_ERR_INVALID_MAC_ADDR:
61 case IAVF_ERR_INVALID_LINK_SETTINGS:
62 case IAVF_ERR_INVALID_PD_ID:
63 case IAVF_ERR_INVALID_QP_ID:
64 case IAVF_ERR_INVALID_CQ_ID:
65 case IAVF_ERR_INVALID_CEQ_ID:
66 case IAVF_ERR_INVALID_AEQ_ID:
67 case IAVF_ERR_INVALID_SIZE:
68 case IAVF_ERR_INVALID_ARP_INDEX:
69 case IAVF_ERR_INVALID_FPM_FUNC_ID:
70 case IAVF_ERR_QP_INVALID_MSG_SIZE:
71 case IAVF_ERR_INVALID_FRAG_COUNT:
72 case IAVF_ERR_INVALID_ALIGNMENT:
73 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
74 case IAVF_ERR_INVALID_IMM_DATA_SIZE:
75 case IAVF_ERR_INVALID_VF_ID:
76 case IAVF_ERR_INVALID_HMCFN_ID:
77 case IAVF_ERR_INVALID_PBLE_INDEX:
78 case IAVF_ERR_INVALID_SD_INDEX:
79 case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
80 case IAVF_ERR_INVALID_SD_TYPE:
81 case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
82 case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
83 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
84 return -EINVAL;
85 case IAVF_ERR_NVM:
86 case IAVF_ERR_NVM_CHECKSUM:
87 case IAVF_ERR_PHY:
88 case IAVF_ERR_CONFIG:
89 case IAVF_ERR_UNKNOWN_PHY:
90 case IAVF_ERR_LINK_SETUP:
91 case IAVF_ERR_ADAPTER_STOPPED:
92 case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
93 case IAVF_ERR_AUTONEG_NOT_COMPLETE:
94 case IAVF_ERR_RESET_FAILED:
95 case IAVF_ERR_BAD_PTR:
96 case IAVF_ERR_SWFW_SYNC:
97 case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
98 case IAVF_ERR_QUEUE_EMPTY:
99 case IAVF_ERR_FLUSHED_QUEUE:
100 case IAVF_ERR_OPCODE_MISMATCH:
101 case IAVF_ERR_CQP_COMPL_ERROR:
102 case IAVF_ERR_BACKING_PAGE_ERROR:
103 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
104 case IAVF_ERR_MEMCPY_FAILED:
105 case IAVF_ERR_SRQ_ENABLED:
106 case IAVF_ERR_ADMIN_QUEUE_ERROR:
107 case IAVF_ERR_ADMIN_QUEUE_FULL:
108 case IAVF_ERR_BAD_IWARP_CQE:
109 case IAVF_ERR_NVM_BLANK_MODE:
110 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
111 case IAVF_ERR_DIAG_TEST_FAILED:
112 case IAVF_ERR_FIRMWARE_API_VERSION:
113 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
114 return -EIO;
115 case IAVF_ERR_DEVICE_NOT_SUPPORTED:
116 return -ENODEV;
117 case IAVF_ERR_NO_AVAILABLE_VSI:
118 case IAVF_ERR_RING_FULL:
119 return -ENOSPC;
120 case IAVF_ERR_NO_MEMORY:
121 return -ENOMEM;
122 case IAVF_ERR_TIMEOUT:
123 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
124 return -ETIMEDOUT;
125 case IAVF_ERR_NOT_IMPLEMENTED:
126 case IAVF_NOT_SUPPORTED:
127 return -EOPNOTSUPP;
128 case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
129 return -EALREADY;
130 case IAVF_ERR_NOT_READY:
131 return -EBUSY;
132 case IAVF_ERR_BUF_TOO_SHORT:
133 return -EMSGSIZE;
134 }
135
136 return -EIO;
137 }
138
virtchnl_status_to_errno(enum virtchnl_status_code v_status)139 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
140 {
141 switch (v_status) {
142 case VIRTCHNL_STATUS_SUCCESS:
143 return 0;
144 case VIRTCHNL_STATUS_ERR_PARAM:
145 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
146 return -EINVAL;
147 case VIRTCHNL_STATUS_ERR_NO_MEMORY:
148 return -ENOMEM;
149 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
150 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
151 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
152 return -EIO;
153 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
154 return -EOPNOTSUPP;
155 }
156
157 return -EIO;
158 }
159
160 /**
161 * iavf_pdev_to_adapter - go from pci_dev to adapter
162 * @pdev: pci_dev pointer
163 */
iavf_pdev_to_adapter(struct pci_dev * pdev)164 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
165 {
166 return netdev_priv(pci_get_drvdata(pdev));
167 }
168
169 /**
170 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
171 * @hw: pointer to the HW structure
172 * @mem: ptr to mem struct to fill out
173 * @size: size of memory requested
174 * @alignment: what to align the allocation to
175 **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)176 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
177 struct iavf_dma_mem *mem,
178 u64 size, u32 alignment)
179 {
180 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
181
182 if (!mem)
183 return IAVF_ERR_PARAM;
184
185 mem->size = ALIGN(size, alignment);
186 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
187 (dma_addr_t *)&mem->pa, GFP_KERNEL);
188 if (mem->va)
189 return 0;
190 else
191 return IAVF_ERR_NO_MEMORY;
192 }
193
194 /**
195 * iavf_free_dma_mem_d - OS specific memory free for shared code
196 * @hw: pointer to the HW structure
197 * @mem: ptr to mem struct to free
198 **/
iavf_free_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem)199 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
200 struct iavf_dma_mem *mem)
201 {
202 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
203
204 if (!mem || !mem->va)
205 return IAVF_ERR_PARAM;
206 dma_free_coherent(&adapter->pdev->dev, mem->size,
207 mem->va, (dma_addr_t)mem->pa);
208 return 0;
209 }
210
211 /**
212 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
213 * @hw: pointer to the HW structure
214 * @mem: ptr to mem struct to fill out
215 * @size: size of memory requested
216 **/
iavf_allocate_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)217 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
218 struct iavf_virt_mem *mem, u32 size)
219 {
220 if (!mem)
221 return IAVF_ERR_PARAM;
222
223 mem->size = size;
224 mem->va = kzalloc(size, GFP_KERNEL);
225
226 if (mem->va)
227 return 0;
228 else
229 return IAVF_ERR_NO_MEMORY;
230 }
231
232 /**
233 * iavf_free_virt_mem_d - OS specific memory free for shared code
234 * @hw: pointer to the HW structure
235 * @mem: ptr to mem struct to free
236 **/
iavf_free_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem)237 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
238 struct iavf_virt_mem *mem)
239 {
240 if (!mem)
241 return IAVF_ERR_PARAM;
242
243 /* it's ok to kfree a NULL pointer */
244 kfree(mem->va);
245
246 return 0;
247 }
248
249 /**
250 * iavf_lock_timeout - try to lock mutex but give up after timeout
251 * @lock: mutex that should be locked
252 * @msecs: timeout in msecs
253 *
254 * Returns 0 on success, negative on failure
255 **/
iavf_lock_timeout(struct mutex * lock,unsigned int msecs)256 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
257 {
258 unsigned int wait, delay = 10;
259
260 for (wait = 0; wait < msecs; wait += delay) {
261 if (mutex_trylock(lock))
262 return 0;
263
264 msleep(delay);
265 }
266
267 return -1;
268 }
269
270 /**
271 * iavf_schedule_reset - Set the flags and schedule a reset event
272 * @adapter: board private structure
273 **/
iavf_schedule_reset(struct iavf_adapter * adapter)274 void iavf_schedule_reset(struct iavf_adapter *adapter)
275 {
276 if (!(adapter->flags &
277 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
278 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
279 queue_work(adapter->wq, &adapter->reset_task);
280 }
281 }
282
283 /**
284 * iavf_schedule_request_stats - Set the flags and schedule statistics request
285 * @adapter: board private structure
286 *
287 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
288 * request and refresh ethtool stats
289 **/
iavf_schedule_request_stats(struct iavf_adapter * adapter)290 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
291 {
292 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
293 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
294 }
295
296 /**
297 * iavf_tx_timeout - Respond to a Tx Hang
298 * @netdev: network interface device structure
299 * @txqueue: queue number that is timing out
300 **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)301 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
302 {
303 struct iavf_adapter *adapter = netdev_priv(netdev);
304
305 adapter->tx_timeout_count++;
306 iavf_schedule_reset(adapter);
307 }
308
309 /**
310 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
311 * @adapter: board private structure
312 **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)313 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
314 {
315 struct iavf_hw *hw = &adapter->hw;
316
317 if (!adapter->msix_entries)
318 return;
319
320 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
321
322 iavf_flush(hw);
323
324 synchronize_irq(adapter->msix_entries[0].vector);
325 }
326
327 /**
328 * iavf_misc_irq_enable - Enable default interrupt generation settings
329 * @adapter: board private structure
330 **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)331 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
332 {
333 struct iavf_hw *hw = &adapter->hw;
334
335 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
336 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
337 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
338
339 iavf_flush(hw);
340 }
341
342 /**
343 * iavf_irq_disable - Mask off interrupt generation on the NIC
344 * @adapter: board private structure
345 **/
iavf_irq_disable(struct iavf_adapter * adapter)346 static void iavf_irq_disable(struct iavf_adapter *adapter)
347 {
348 int i;
349 struct iavf_hw *hw = &adapter->hw;
350
351 if (!adapter->msix_entries)
352 return;
353
354 for (i = 1; i < adapter->num_msix_vectors; i++) {
355 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
356 synchronize_irq(adapter->msix_entries[i].vector);
357 }
358 iavf_flush(hw);
359 }
360
361 /**
362 * iavf_irq_enable_queues - Enable interrupt for specified queues
363 * @adapter: board private structure
364 * @mask: bitmap of queues to enable
365 **/
iavf_irq_enable_queues(struct iavf_adapter * adapter,u32 mask)366 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
367 {
368 struct iavf_hw *hw = &adapter->hw;
369 int i;
370
371 for (i = 1; i < adapter->num_msix_vectors; i++) {
372 if (mask & BIT(i - 1)) {
373 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
374 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
375 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
376 }
377 }
378 }
379
380 /**
381 * iavf_irq_enable - Enable default interrupt generation settings
382 * @adapter: board private structure
383 * @flush: boolean value whether to run rd32()
384 **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)385 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
386 {
387 struct iavf_hw *hw = &adapter->hw;
388
389 iavf_misc_irq_enable(adapter);
390 iavf_irq_enable_queues(adapter, ~0);
391
392 if (flush)
393 iavf_flush(hw);
394 }
395
396 /**
397 * iavf_msix_aq - Interrupt handler for vector 0
398 * @irq: interrupt number
399 * @data: pointer to netdev
400 **/
iavf_msix_aq(int irq,void * data)401 static irqreturn_t iavf_msix_aq(int irq, void *data)
402 {
403 struct net_device *netdev = data;
404 struct iavf_adapter *adapter = netdev_priv(netdev);
405 struct iavf_hw *hw = &adapter->hw;
406
407 /* handle non-queue interrupts, these reads clear the registers */
408 rd32(hw, IAVF_VFINT_ICR01);
409 rd32(hw, IAVF_VFINT_ICR0_ENA1);
410
411 if (adapter->state != __IAVF_REMOVE)
412 /* schedule work on the private workqueue */
413 queue_work(adapter->wq, &adapter->adminq_task);
414
415 return IRQ_HANDLED;
416 }
417
418 /**
419 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
420 * @irq: interrupt number
421 * @data: pointer to a q_vector
422 **/
iavf_msix_clean_rings(int irq,void * data)423 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
424 {
425 struct iavf_q_vector *q_vector = data;
426
427 if (!q_vector->tx.ring && !q_vector->rx.ring)
428 return IRQ_HANDLED;
429
430 napi_schedule_irqoff(&q_vector->napi);
431
432 return IRQ_HANDLED;
433 }
434
435 /**
436 * iavf_map_vector_to_rxq - associate irqs with rx queues
437 * @adapter: board private structure
438 * @v_idx: interrupt number
439 * @r_idx: queue number
440 **/
441 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)442 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
443 {
444 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
445 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
446 struct iavf_hw *hw = &adapter->hw;
447
448 rx_ring->q_vector = q_vector;
449 rx_ring->next = q_vector->rx.ring;
450 rx_ring->vsi = &adapter->vsi;
451 q_vector->rx.ring = rx_ring;
452 q_vector->rx.count++;
453 q_vector->rx.next_update = jiffies + 1;
454 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
455 q_vector->ring_mask |= BIT(r_idx);
456 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
457 q_vector->rx.current_itr >> 1);
458 q_vector->rx.current_itr = q_vector->rx.target_itr;
459 }
460
461 /**
462 * iavf_map_vector_to_txq - associate irqs with tx queues
463 * @adapter: board private structure
464 * @v_idx: interrupt number
465 * @t_idx: queue number
466 **/
467 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)468 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
469 {
470 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
471 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
472 struct iavf_hw *hw = &adapter->hw;
473
474 tx_ring->q_vector = q_vector;
475 tx_ring->next = q_vector->tx.ring;
476 tx_ring->vsi = &adapter->vsi;
477 q_vector->tx.ring = tx_ring;
478 q_vector->tx.count++;
479 q_vector->tx.next_update = jiffies + 1;
480 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
481 q_vector->num_ringpairs++;
482 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
483 q_vector->tx.target_itr >> 1);
484 q_vector->tx.current_itr = q_vector->tx.target_itr;
485 }
486
487 /**
488 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
489 * @adapter: board private structure to initialize
490 *
491 * This function maps descriptor rings to the queue-specific vectors
492 * we were allotted through the MSI-X enabling code. Ideally, we'd have
493 * one vector per ring/queue, but on a constrained vector budget, we
494 * group the rings as "efficiently" as possible. You would add new
495 * mapping configurations in here.
496 **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)497 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
498 {
499 int rings_remaining = adapter->num_active_queues;
500 int ridx = 0, vidx = 0;
501 int q_vectors;
502
503 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
504
505 for (; ridx < rings_remaining; ridx++) {
506 iavf_map_vector_to_rxq(adapter, vidx, ridx);
507 iavf_map_vector_to_txq(adapter, vidx, ridx);
508
509 /* In the case where we have more queues than vectors, continue
510 * round-robin on vectors until all queues are mapped.
511 */
512 if (++vidx >= q_vectors)
513 vidx = 0;
514 }
515
516 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
517 }
518
519 /**
520 * iavf_irq_affinity_notify - Callback for affinity changes
521 * @notify: context as to what irq was changed
522 * @mask: the new affinity mask
523 *
524 * This is a callback function used by the irq_set_affinity_notifier function
525 * so that we may register to receive changes to the irq affinity masks.
526 **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)527 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
528 const cpumask_t *mask)
529 {
530 struct iavf_q_vector *q_vector =
531 container_of(notify, struct iavf_q_vector, affinity_notify);
532
533 cpumask_copy(&q_vector->affinity_mask, mask);
534 }
535
536 /**
537 * iavf_irq_affinity_release - Callback for affinity notifier release
538 * @ref: internal core kernel usage
539 *
540 * This is a callback function used by the irq_set_affinity_notifier function
541 * to inform the current notification subscriber that they will no longer
542 * receive notifications.
543 **/
iavf_irq_affinity_release(struct kref * ref)544 static void iavf_irq_affinity_release(struct kref *ref) {}
545
546 /**
547 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
548 * @adapter: board private structure
549 * @basename: device basename
550 *
551 * Allocates MSI-X vectors for tx and rx handling, and requests
552 * interrupts from the kernel.
553 **/
554 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)555 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
556 {
557 unsigned int vector, q_vectors;
558 unsigned int rx_int_idx = 0, tx_int_idx = 0;
559 int irq_num, err;
560 int cpu;
561
562 iavf_irq_disable(adapter);
563 /* Decrement for Other and TCP Timer vectors */
564 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
565
566 for (vector = 0; vector < q_vectors; vector++) {
567 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
568
569 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
570
571 if (q_vector->tx.ring && q_vector->rx.ring) {
572 snprintf(q_vector->name, sizeof(q_vector->name),
573 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
574 tx_int_idx++;
575 } else if (q_vector->rx.ring) {
576 snprintf(q_vector->name, sizeof(q_vector->name),
577 "iavf-%s-rx-%u", basename, rx_int_idx++);
578 } else if (q_vector->tx.ring) {
579 snprintf(q_vector->name, sizeof(q_vector->name),
580 "iavf-%s-tx-%u", basename, tx_int_idx++);
581 } else {
582 /* skip this unused q_vector */
583 continue;
584 }
585 err = request_irq(irq_num,
586 iavf_msix_clean_rings,
587 0,
588 q_vector->name,
589 q_vector);
590 if (err) {
591 dev_info(&adapter->pdev->dev,
592 "Request_irq failed, error: %d\n", err);
593 goto free_queue_irqs;
594 }
595 /* register for affinity change notifications */
596 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
597 q_vector->affinity_notify.release =
598 iavf_irq_affinity_release;
599 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
600 /* Spread the IRQ affinity hints across online CPUs. Note that
601 * get_cpu_mask returns a mask with a permanent lifetime so
602 * it's safe to use as a hint for irq_update_affinity_hint.
603 */
604 cpu = cpumask_local_spread(q_vector->v_idx, -1);
605 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
606 }
607
608 return 0;
609
610 free_queue_irqs:
611 while (vector) {
612 vector--;
613 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
614 irq_set_affinity_notifier(irq_num, NULL);
615 irq_update_affinity_hint(irq_num, NULL);
616 free_irq(irq_num, &adapter->q_vectors[vector]);
617 }
618 return err;
619 }
620
621 /**
622 * iavf_request_misc_irq - Initialize MSI-X interrupts
623 * @adapter: board private structure
624 *
625 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
626 * vector is only for the admin queue, and stays active even when the netdev
627 * is closed.
628 **/
iavf_request_misc_irq(struct iavf_adapter * adapter)629 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
630 {
631 struct net_device *netdev = adapter->netdev;
632 int err;
633
634 snprintf(adapter->misc_vector_name,
635 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
636 dev_name(&adapter->pdev->dev));
637 err = request_irq(adapter->msix_entries[0].vector,
638 &iavf_msix_aq, 0,
639 adapter->misc_vector_name, netdev);
640 if (err) {
641 dev_err(&adapter->pdev->dev,
642 "request_irq for %s failed: %d\n",
643 adapter->misc_vector_name, err);
644 free_irq(adapter->msix_entries[0].vector, netdev);
645 }
646 return err;
647 }
648
649 /**
650 * iavf_free_traffic_irqs - Free MSI-X interrupts
651 * @adapter: board private structure
652 *
653 * Frees all MSI-X vectors other than 0.
654 **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)655 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
656 {
657 int vector, irq_num, q_vectors;
658
659 if (!adapter->msix_entries)
660 return;
661
662 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
663
664 for (vector = 0; vector < q_vectors; vector++) {
665 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
666 irq_set_affinity_notifier(irq_num, NULL);
667 irq_update_affinity_hint(irq_num, NULL);
668 free_irq(irq_num, &adapter->q_vectors[vector]);
669 }
670 }
671
672 /**
673 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
674 * @adapter: board private structure
675 *
676 * Frees MSI-X vector 0.
677 **/
iavf_free_misc_irq(struct iavf_adapter * adapter)678 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
679 {
680 struct net_device *netdev = adapter->netdev;
681
682 if (!adapter->msix_entries)
683 return;
684
685 free_irq(adapter->msix_entries[0].vector, netdev);
686 }
687
688 /**
689 * iavf_configure_tx - Configure Transmit Unit after Reset
690 * @adapter: board private structure
691 *
692 * Configure the Tx unit of the MAC after a reset.
693 **/
iavf_configure_tx(struct iavf_adapter * adapter)694 static void iavf_configure_tx(struct iavf_adapter *adapter)
695 {
696 struct iavf_hw *hw = &adapter->hw;
697 int i;
698
699 for (i = 0; i < adapter->num_active_queues; i++)
700 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
701 }
702
703 /**
704 * iavf_configure_rx - Configure Receive Unit after Reset
705 * @adapter: board private structure
706 *
707 * Configure the Rx unit of the MAC after a reset.
708 **/
iavf_configure_rx(struct iavf_adapter * adapter)709 static void iavf_configure_rx(struct iavf_adapter *adapter)
710 {
711 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
712 struct iavf_hw *hw = &adapter->hw;
713 int i;
714
715 /* Legacy Rx will always default to a 2048 buffer size. */
716 #if (PAGE_SIZE < 8192)
717 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
718 struct net_device *netdev = adapter->netdev;
719
720 /* For jumbo frames on systems with 4K pages we have to use
721 * an order 1 page, so we might as well increase the size
722 * of our Rx buffer to make better use of the available space
723 */
724 rx_buf_len = IAVF_RXBUFFER_3072;
725
726 /* We use a 1536 buffer size for configurations with
727 * standard Ethernet mtu. On x86 this gives us enough room
728 * for shared info and 192 bytes of padding.
729 */
730 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
731 (netdev->mtu <= ETH_DATA_LEN))
732 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
733 }
734 #endif
735
736 for (i = 0; i < adapter->num_active_queues; i++) {
737 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
738 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
739
740 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
741 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
742 else
743 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
744 }
745 }
746
747 /**
748 * iavf_find_vlan - Search filter list for specific vlan filter
749 * @adapter: board private structure
750 * @vlan: vlan tag
751 *
752 * Returns ptr to the filter object or NULL. Must be called while holding the
753 * mac_vlan_list_lock.
754 **/
755 static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)756 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
757 struct iavf_vlan vlan)
758 {
759 struct iavf_vlan_filter *f;
760
761 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
762 if (f->vlan.vid == vlan.vid &&
763 f->vlan.tpid == vlan.tpid)
764 return f;
765 }
766
767 return NULL;
768 }
769
770 /**
771 * iavf_add_vlan - Add a vlan filter to the list
772 * @adapter: board private structure
773 * @vlan: VLAN tag
774 *
775 * Returns ptr to the filter object or NULL when no memory available.
776 **/
777 static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)778 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
779 struct iavf_vlan vlan)
780 {
781 struct iavf_vlan_filter *f = NULL;
782
783 spin_lock_bh(&adapter->mac_vlan_list_lock);
784
785 f = iavf_find_vlan(adapter, vlan);
786 if (!f) {
787 f = kzalloc(sizeof(*f), GFP_ATOMIC);
788 if (!f)
789 goto clearout;
790
791 f->vlan = vlan;
792
793 list_add_tail(&f->list, &adapter->vlan_filter_list);
794 f->add = true;
795 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
796 }
797
798 clearout:
799 spin_unlock_bh(&adapter->mac_vlan_list_lock);
800 return f;
801 }
802
803 /**
804 * iavf_del_vlan - Remove a vlan filter from the list
805 * @adapter: board private structure
806 * @vlan: VLAN tag
807 **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)808 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
809 {
810 struct iavf_vlan_filter *f;
811
812 spin_lock_bh(&adapter->mac_vlan_list_lock);
813
814 f = iavf_find_vlan(adapter, vlan);
815 if (f) {
816 f->remove = true;
817 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
818 }
819
820 spin_unlock_bh(&adapter->mac_vlan_list_lock);
821 }
822
823 /**
824 * iavf_restore_filters
825 * @adapter: board private structure
826 *
827 * Restore existing non MAC filters when VF netdev comes back up
828 **/
iavf_restore_filters(struct iavf_adapter * adapter)829 static void iavf_restore_filters(struct iavf_adapter *adapter)
830 {
831 u16 vid;
832
833 /* re-add all VLAN filters */
834 for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
835 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
836
837 for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
838 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
839 }
840
841 /**
842 * iavf_get_num_vlans_added - get number of VLANs added
843 * @adapter: board private structure
844 */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)845 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
846 {
847 return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
848 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
849 }
850
851 /**
852 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
853 * @adapter: board private structure
854 *
855 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
856 * do not impose a limit as that maintains current behavior and for
857 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
858 **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)859 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
860 {
861 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
862 * never been a limit on the VF driver side
863 */
864 if (VLAN_ALLOWED(adapter))
865 return VLAN_N_VID;
866 else if (VLAN_V2_ALLOWED(adapter))
867 return adapter->vlan_v2_caps.filtering.max_filters;
868
869 return 0;
870 }
871
872 /**
873 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
874 * @adapter: board private structure
875 **/
iavf_max_vlans_added(struct iavf_adapter * adapter)876 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
877 {
878 if (iavf_get_num_vlans_added(adapter) <
879 iavf_get_max_vlans_allowed(adapter))
880 return false;
881
882 return true;
883 }
884
885 /**
886 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
887 * @netdev: network device struct
888 * @proto: unused protocol data
889 * @vid: VLAN tag
890 **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)891 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
892 __always_unused __be16 proto, u16 vid)
893 {
894 struct iavf_adapter *adapter = netdev_priv(netdev);
895
896 if (!VLAN_FILTERING_ALLOWED(adapter))
897 return -EIO;
898
899 if (iavf_max_vlans_added(adapter)) {
900 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
901 iavf_get_max_vlans_allowed(adapter));
902 return -EIO;
903 }
904
905 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
906 return -ENOMEM;
907
908 return 0;
909 }
910
911 /**
912 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
913 * @netdev: network device struct
914 * @proto: unused protocol data
915 * @vid: VLAN tag
916 **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)917 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
918 __always_unused __be16 proto, u16 vid)
919 {
920 struct iavf_adapter *adapter = netdev_priv(netdev);
921
922 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
923 if (proto == cpu_to_be16(ETH_P_8021Q))
924 clear_bit(vid, adapter->vsi.active_cvlans);
925 else
926 clear_bit(vid, adapter->vsi.active_svlans);
927
928 return 0;
929 }
930
931 /**
932 * iavf_find_filter - Search filter list for specific mac filter
933 * @adapter: board private structure
934 * @macaddr: the MAC address
935 *
936 * Returns ptr to the filter object or NULL. Must be called while holding the
937 * mac_vlan_list_lock.
938 **/
939 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)940 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
941 const u8 *macaddr)
942 {
943 struct iavf_mac_filter *f;
944
945 if (!macaddr)
946 return NULL;
947
948 list_for_each_entry(f, &adapter->mac_filter_list, list) {
949 if (ether_addr_equal(macaddr, f->macaddr))
950 return f;
951 }
952 return NULL;
953 }
954
955 /**
956 * iavf_add_filter - Add a mac filter to the filter list
957 * @adapter: board private structure
958 * @macaddr: the MAC address
959 *
960 * Returns ptr to the filter object or NULL when no memory available.
961 **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)962 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
963 const u8 *macaddr)
964 {
965 struct iavf_mac_filter *f;
966
967 if (!macaddr)
968 return NULL;
969
970 f = iavf_find_filter(adapter, macaddr);
971 if (!f) {
972 f = kzalloc(sizeof(*f), GFP_ATOMIC);
973 if (!f)
974 return f;
975
976 ether_addr_copy(f->macaddr, macaddr);
977
978 list_add_tail(&f->list, &adapter->mac_filter_list);
979 f->add = true;
980 f->add_handled = false;
981 f->is_new_mac = true;
982 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
983 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
984 } else {
985 f->remove = false;
986 }
987
988 return f;
989 }
990
991 /**
992 * iavf_replace_primary_mac - Replace current primary address
993 * @adapter: board private structure
994 * @new_mac: new MAC address to be applied
995 *
996 * Replace current dev_addr and send request to PF for removal of previous
997 * primary MAC address filter and addition of new primary MAC filter.
998 * Return 0 for success, -ENOMEM for failure.
999 *
1000 * Do not call this with mac_vlan_list_lock!
1001 **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)1002 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1003 const u8 *new_mac)
1004 {
1005 struct iavf_hw *hw = &adapter->hw;
1006 struct iavf_mac_filter *f;
1007
1008 spin_lock_bh(&adapter->mac_vlan_list_lock);
1009
1010 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1011 f->is_primary = false;
1012 }
1013
1014 f = iavf_find_filter(adapter, hw->mac.addr);
1015 if (f) {
1016 f->remove = true;
1017 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1018 }
1019
1020 f = iavf_add_filter(adapter, new_mac);
1021
1022 if (f) {
1023 /* Always send the request to add if changing primary MAC
1024 * even if filter is already present on the list
1025 */
1026 f->is_primary = true;
1027 f->add = true;
1028 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1029 ether_addr_copy(hw->mac.addr, new_mac);
1030 }
1031
1032 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1033
1034 /* schedule the watchdog task to immediately process the request */
1035 if (f) {
1036 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1037 return 0;
1038 }
1039 return -ENOMEM;
1040 }
1041
1042 /**
1043 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1044 * @netdev: network interface device structure
1045 * @macaddr: MAC address to set
1046 *
1047 * Returns true on success, false on failure
1048 */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)1049 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1050 const u8 *macaddr)
1051 {
1052 struct iavf_adapter *adapter = netdev_priv(netdev);
1053 struct iavf_mac_filter *f;
1054 bool ret = false;
1055
1056 spin_lock_bh(&adapter->mac_vlan_list_lock);
1057
1058 f = iavf_find_filter(adapter, macaddr);
1059
1060 if (!f || (!f->add && f->add_handled))
1061 ret = true;
1062
1063 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1064
1065 return ret;
1066 }
1067
1068 /**
1069 * iavf_set_mac - NDO callback to set port MAC address
1070 * @netdev: network interface device structure
1071 * @p: pointer to an address structure
1072 *
1073 * Returns 0 on success, negative on failure
1074 */
iavf_set_mac(struct net_device * netdev,void * p)1075 static int iavf_set_mac(struct net_device *netdev, void *p)
1076 {
1077 struct iavf_adapter *adapter = netdev_priv(netdev);
1078 struct sockaddr *addr = p;
1079 int ret;
1080
1081 if (!is_valid_ether_addr(addr->sa_data))
1082 return -EADDRNOTAVAIL;
1083
1084 ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1085
1086 if (ret)
1087 return ret;
1088
1089 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1090 iavf_is_mac_set_handled(netdev, addr->sa_data),
1091 msecs_to_jiffies(2500));
1092
1093 /* If ret < 0 then it means wait was interrupted.
1094 * If ret == 0 then it means we got a timeout.
1095 * else it means we got response for set MAC from PF,
1096 * check if netdev MAC was updated to requested MAC,
1097 * if yes then set MAC succeeded otherwise it failed return -EACCES
1098 */
1099 if (ret < 0)
1100 return ret;
1101
1102 if (!ret)
1103 return -EAGAIN;
1104
1105 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1106 return -EACCES;
1107
1108 return 0;
1109 }
1110
1111 /**
1112 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1113 * @netdev: the netdevice
1114 * @addr: address to add
1115 *
1116 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1117 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1118 */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)1119 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1120 {
1121 struct iavf_adapter *adapter = netdev_priv(netdev);
1122
1123 if (iavf_add_filter(adapter, addr))
1124 return 0;
1125 else
1126 return -ENOMEM;
1127 }
1128
1129 /**
1130 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1131 * @netdev: the netdevice
1132 * @addr: address to add
1133 *
1134 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1135 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1136 */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)1137 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1138 {
1139 struct iavf_adapter *adapter = netdev_priv(netdev);
1140 struct iavf_mac_filter *f;
1141
1142 /* Under some circumstances, we might receive a request to delete
1143 * our own device address from our uc list. Because we store the
1144 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1145 * such requests and not delete our device address from this list.
1146 */
1147 if (ether_addr_equal(addr, netdev->dev_addr))
1148 return 0;
1149
1150 f = iavf_find_filter(adapter, addr);
1151 if (f) {
1152 f->remove = true;
1153 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1154 }
1155 return 0;
1156 }
1157
1158 /**
1159 * iavf_set_rx_mode - NDO callback to set the netdev filters
1160 * @netdev: network interface device structure
1161 **/
iavf_set_rx_mode(struct net_device * netdev)1162 static void iavf_set_rx_mode(struct net_device *netdev)
1163 {
1164 struct iavf_adapter *adapter = netdev_priv(netdev);
1165
1166 spin_lock_bh(&adapter->mac_vlan_list_lock);
1167 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1168 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1169 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1170
1171 if (netdev->flags & IFF_PROMISC &&
1172 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1173 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1174 else if (!(netdev->flags & IFF_PROMISC) &&
1175 adapter->flags & IAVF_FLAG_PROMISC_ON)
1176 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1177
1178 if (netdev->flags & IFF_ALLMULTI &&
1179 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1180 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1181 else if (!(netdev->flags & IFF_ALLMULTI) &&
1182 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1183 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1184 }
1185
1186 /**
1187 * iavf_napi_enable_all - enable NAPI on all queue vectors
1188 * @adapter: board private structure
1189 **/
iavf_napi_enable_all(struct iavf_adapter * adapter)1190 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1191 {
1192 int q_idx;
1193 struct iavf_q_vector *q_vector;
1194 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1195
1196 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1197 struct napi_struct *napi;
1198
1199 q_vector = &adapter->q_vectors[q_idx];
1200 napi = &q_vector->napi;
1201 napi_enable(napi);
1202 }
1203 }
1204
1205 /**
1206 * iavf_napi_disable_all - disable NAPI on all queue vectors
1207 * @adapter: board private structure
1208 **/
iavf_napi_disable_all(struct iavf_adapter * adapter)1209 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1210 {
1211 int q_idx;
1212 struct iavf_q_vector *q_vector;
1213 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1214
1215 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1216 q_vector = &adapter->q_vectors[q_idx];
1217 napi_disable(&q_vector->napi);
1218 }
1219 }
1220
1221 /**
1222 * iavf_configure - set up transmit and receive data structures
1223 * @adapter: board private structure
1224 **/
iavf_configure(struct iavf_adapter * adapter)1225 static void iavf_configure(struct iavf_adapter *adapter)
1226 {
1227 struct net_device *netdev = adapter->netdev;
1228 int i;
1229
1230 iavf_set_rx_mode(netdev);
1231
1232 iavf_configure_tx(adapter);
1233 iavf_configure_rx(adapter);
1234 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1235
1236 for (i = 0; i < adapter->num_active_queues; i++) {
1237 struct iavf_ring *ring = &adapter->rx_rings[i];
1238
1239 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1240 }
1241 }
1242
1243 /**
1244 * iavf_up_complete - Finish the last steps of bringing up a connection
1245 * @adapter: board private structure
1246 *
1247 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1248 **/
iavf_up_complete(struct iavf_adapter * adapter)1249 static void iavf_up_complete(struct iavf_adapter *adapter)
1250 {
1251 iavf_change_state(adapter, __IAVF_RUNNING);
1252 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1253
1254 iavf_napi_enable_all(adapter);
1255
1256 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1257 if (CLIENT_ENABLED(adapter))
1258 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1259 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1260 }
1261
1262 /**
1263 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1264 * yet and mark other to be removed.
1265 * @adapter: board private structure
1266 **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1267 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1268 {
1269 struct iavf_vlan_filter *vlf, *vlftmp;
1270 struct iavf_mac_filter *f, *ftmp;
1271
1272 spin_lock_bh(&adapter->mac_vlan_list_lock);
1273 /* clear the sync flag on all filters */
1274 __dev_uc_unsync(adapter->netdev, NULL);
1275 __dev_mc_unsync(adapter->netdev, NULL);
1276
1277 /* remove all MAC filters */
1278 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1279 list) {
1280 if (f->add) {
1281 list_del(&f->list);
1282 kfree(f);
1283 } else {
1284 f->remove = true;
1285 }
1286 }
1287
1288 /* remove all VLAN filters */
1289 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1290 list) {
1291 if (vlf->add) {
1292 list_del(&vlf->list);
1293 kfree(vlf);
1294 } else {
1295 vlf->remove = true;
1296 }
1297 }
1298 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1299 }
1300
1301 /**
1302 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1303 * mark other to be removed.
1304 * @adapter: board private structure
1305 **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1306 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1307 {
1308 struct iavf_cloud_filter *cf, *cftmp;
1309
1310 /* remove all cloud filters */
1311 spin_lock_bh(&adapter->cloud_filter_list_lock);
1312 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1313 list) {
1314 if (cf->add) {
1315 list_del(&cf->list);
1316 kfree(cf);
1317 adapter->num_cloud_filters--;
1318 } else {
1319 cf->del = true;
1320 }
1321 }
1322 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1323 }
1324
1325 /**
1326 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1327 * other to be removed.
1328 * @adapter: board private structure
1329 **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1330 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1331 {
1332 struct iavf_fdir_fltr *fdir, *fdirtmp;
1333
1334 /* remove all Flow Director filters */
1335 spin_lock_bh(&adapter->fdir_fltr_lock);
1336 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1337 list) {
1338 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1339 list_del(&fdir->list);
1340 kfree(fdir);
1341 adapter->fdir_active_fltr--;
1342 } else {
1343 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1344 }
1345 }
1346 spin_unlock_bh(&adapter->fdir_fltr_lock);
1347 }
1348
1349 /**
1350 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1351 * other to be removed.
1352 * @adapter: board private structure
1353 **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1354 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1355 {
1356 struct iavf_adv_rss *rss, *rsstmp;
1357
1358 /* remove all advance RSS configuration */
1359 spin_lock_bh(&adapter->adv_rss_lock);
1360 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1361 list) {
1362 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1363 list_del(&rss->list);
1364 kfree(rss);
1365 } else {
1366 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1367 }
1368 }
1369 spin_unlock_bh(&adapter->adv_rss_lock);
1370 }
1371
1372 /**
1373 * iavf_down - Shutdown the connection processing
1374 * @adapter: board private structure
1375 *
1376 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1377 **/
iavf_down(struct iavf_adapter * adapter)1378 void iavf_down(struct iavf_adapter *adapter)
1379 {
1380 struct net_device *netdev = adapter->netdev;
1381
1382 if (adapter->state <= __IAVF_DOWN_PENDING)
1383 return;
1384
1385 netif_carrier_off(netdev);
1386 netif_tx_disable(netdev);
1387 adapter->link_up = false;
1388 iavf_napi_disable_all(adapter);
1389 iavf_irq_disable(adapter);
1390
1391 iavf_clear_mac_vlan_filters(adapter);
1392 iavf_clear_cloud_filters(adapter);
1393 iavf_clear_fdir_filters(adapter);
1394 iavf_clear_adv_rss_conf(adapter);
1395
1396 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1397 /* cancel any current operation */
1398 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1399 /* Schedule operations to close down the HW. Don't wait
1400 * here for this to complete. The watchdog is still running
1401 * and it will take care of this.
1402 */
1403 if (!list_empty(&adapter->mac_filter_list))
1404 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1405 if (!list_empty(&adapter->vlan_filter_list))
1406 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1407 if (!list_empty(&adapter->cloud_filter_list))
1408 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1409 if (!list_empty(&adapter->fdir_list_head))
1410 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1411 if (!list_empty(&adapter->adv_rss_list_head))
1412 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1413 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1414 }
1415
1416 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1417 }
1418
1419 /**
1420 * iavf_acquire_msix_vectors - Setup the MSIX capability
1421 * @adapter: board private structure
1422 * @vectors: number of vectors to request
1423 *
1424 * Work with the OS to set up the MSIX vectors needed.
1425 *
1426 * Returns 0 on success, negative on failure
1427 **/
1428 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1429 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1430 {
1431 int err, vector_threshold;
1432
1433 /* We'll want at least 3 (vector_threshold):
1434 * 0) Other (Admin Queue and link, mostly)
1435 * 1) TxQ[0] Cleanup
1436 * 2) RxQ[0] Cleanup
1437 */
1438 vector_threshold = MIN_MSIX_COUNT;
1439
1440 /* The more we get, the more we will assign to Tx/Rx Cleanup
1441 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1442 * Right now, we simply care about how many we'll get; we'll
1443 * set them up later while requesting irq's.
1444 */
1445 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1446 vector_threshold, vectors);
1447 if (err < 0) {
1448 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1449 kfree(adapter->msix_entries);
1450 adapter->msix_entries = NULL;
1451 return err;
1452 }
1453
1454 /* Adjust for only the vectors we'll use, which is minimum
1455 * of max_msix_q_vectors + NONQ_VECS, or the number of
1456 * vectors we were allocated.
1457 */
1458 adapter->num_msix_vectors = err;
1459 return 0;
1460 }
1461
1462 /**
1463 * iavf_free_queues - Free memory for all rings
1464 * @adapter: board private structure to initialize
1465 *
1466 * Free all of the memory associated with queue pairs.
1467 **/
iavf_free_queues(struct iavf_adapter * adapter)1468 static void iavf_free_queues(struct iavf_adapter *adapter)
1469 {
1470 if (!adapter->vsi_res)
1471 return;
1472 adapter->num_active_queues = 0;
1473 kfree(adapter->tx_rings);
1474 adapter->tx_rings = NULL;
1475 kfree(adapter->rx_rings);
1476 adapter->rx_rings = NULL;
1477 }
1478
1479 /**
1480 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1481 * @adapter: board private structure
1482 *
1483 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1484 * stripped in certain descriptor fields. Instead of checking the offload
1485 * capability bits in the hot path, cache the location the ring specific
1486 * flags.
1487 */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1488 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1489 {
1490 int i;
1491
1492 for (i = 0; i < adapter->num_active_queues; i++) {
1493 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1494 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1495
1496 /* prevent multiple L2TAG bits being set after VFR */
1497 tx_ring->flags &=
1498 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1499 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1500 rx_ring->flags &=
1501 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1502 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1503
1504 if (VLAN_ALLOWED(adapter)) {
1505 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1506 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1507 } else if (VLAN_V2_ALLOWED(adapter)) {
1508 struct virtchnl_vlan_supported_caps *stripping_support;
1509 struct virtchnl_vlan_supported_caps *insertion_support;
1510
1511 stripping_support =
1512 &adapter->vlan_v2_caps.offloads.stripping_support;
1513 insertion_support =
1514 &adapter->vlan_v2_caps.offloads.insertion_support;
1515
1516 if (stripping_support->outer) {
1517 if (stripping_support->outer &
1518 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1519 rx_ring->flags |=
1520 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1521 else if (stripping_support->outer &
1522 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1523 rx_ring->flags |=
1524 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1525 } else if (stripping_support->inner) {
1526 if (stripping_support->inner &
1527 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1528 rx_ring->flags |=
1529 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1530 else if (stripping_support->inner &
1531 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1532 rx_ring->flags |=
1533 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1534 }
1535
1536 if (insertion_support->outer) {
1537 if (insertion_support->outer &
1538 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1539 tx_ring->flags |=
1540 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1541 else if (insertion_support->outer &
1542 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1543 tx_ring->flags |=
1544 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1545 } else if (insertion_support->inner) {
1546 if (insertion_support->inner &
1547 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1548 tx_ring->flags |=
1549 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1550 else if (insertion_support->inner &
1551 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1552 tx_ring->flags |=
1553 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1554 }
1555 }
1556 }
1557 }
1558
1559 /**
1560 * iavf_alloc_queues - Allocate memory for all rings
1561 * @adapter: board private structure to initialize
1562 *
1563 * We allocate one ring per queue at run-time since we don't know the
1564 * number of queues at compile-time. The polling_netdev array is
1565 * intended for Multiqueue, but should work fine with a single queue.
1566 **/
iavf_alloc_queues(struct iavf_adapter * adapter)1567 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1568 {
1569 int i, num_active_queues;
1570
1571 /* If we're in reset reallocating queues we don't actually know yet for
1572 * certain the PF gave us the number of queues we asked for but we'll
1573 * assume it did. Once basic reset is finished we'll confirm once we
1574 * start negotiating config with PF.
1575 */
1576 if (adapter->num_req_queues)
1577 num_active_queues = adapter->num_req_queues;
1578 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1579 adapter->num_tc)
1580 num_active_queues = adapter->ch_config.total_qps;
1581 else
1582 num_active_queues = min_t(int,
1583 adapter->vsi_res->num_queue_pairs,
1584 (int)(num_online_cpus()));
1585
1586
1587 adapter->tx_rings = kcalloc(num_active_queues,
1588 sizeof(struct iavf_ring), GFP_KERNEL);
1589 if (!adapter->tx_rings)
1590 goto err_out;
1591 adapter->rx_rings = kcalloc(num_active_queues,
1592 sizeof(struct iavf_ring), GFP_KERNEL);
1593 if (!adapter->rx_rings)
1594 goto err_out;
1595
1596 for (i = 0; i < num_active_queues; i++) {
1597 struct iavf_ring *tx_ring;
1598 struct iavf_ring *rx_ring;
1599
1600 tx_ring = &adapter->tx_rings[i];
1601
1602 tx_ring->queue_index = i;
1603 tx_ring->netdev = adapter->netdev;
1604 tx_ring->dev = &adapter->pdev->dev;
1605 tx_ring->count = adapter->tx_desc_count;
1606 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1607 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1608 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1609
1610 rx_ring = &adapter->rx_rings[i];
1611 rx_ring->queue_index = i;
1612 rx_ring->netdev = adapter->netdev;
1613 rx_ring->dev = &adapter->pdev->dev;
1614 rx_ring->count = adapter->rx_desc_count;
1615 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1616 }
1617
1618 adapter->num_active_queues = num_active_queues;
1619
1620 iavf_set_queue_vlan_tag_loc(adapter);
1621
1622 return 0;
1623
1624 err_out:
1625 iavf_free_queues(adapter);
1626 return -ENOMEM;
1627 }
1628
1629 /**
1630 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1631 * @adapter: board private structure to initialize
1632 *
1633 * Attempt to configure the interrupts using the best available
1634 * capabilities of the hardware and the kernel.
1635 **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1636 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1637 {
1638 int vector, v_budget;
1639 int pairs = 0;
1640 int err = 0;
1641
1642 if (!adapter->vsi_res) {
1643 err = -EIO;
1644 goto out;
1645 }
1646 pairs = adapter->num_active_queues;
1647
1648 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1649 * us much good if we have more vectors than CPUs. However, we already
1650 * limit the total number of queues by the number of CPUs so we do not
1651 * need any further limiting here.
1652 */
1653 v_budget = min_t(int, pairs + NONQ_VECS,
1654 (int)adapter->vf_res->max_vectors);
1655
1656 adapter->msix_entries = kcalloc(v_budget,
1657 sizeof(struct msix_entry), GFP_KERNEL);
1658 if (!adapter->msix_entries) {
1659 err = -ENOMEM;
1660 goto out;
1661 }
1662
1663 for (vector = 0; vector < v_budget; vector++)
1664 adapter->msix_entries[vector].entry = vector;
1665
1666 err = iavf_acquire_msix_vectors(adapter, v_budget);
1667
1668 out:
1669 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1670 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1671 return err;
1672 }
1673
1674 /**
1675 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1676 * @adapter: board private structure
1677 *
1678 * Return 0 on success, negative on failure
1679 **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1680 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1681 {
1682 struct iavf_aqc_get_set_rss_key_data *rss_key =
1683 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1684 struct iavf_hw *hw = &adapter->hw;
1685 enum iavf_status status;
1686
1687 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1688 /* bail because we already have a command pending */
1689 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1690 adapter->current_op);
1691 return -EBUSY;
1692 }
1693
1694 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1695 if (status) {
1696 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1697 iavf_stat_str(hw, status),
1698 iavf_aq_str(hw, hw->aq.asq_last_status));
1699 return iavf_status_to_errno(status);
1700
1701 }
1702
1703 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1704 adapter->rss_lut, adapter->rss_lut_size);
1705 if (status) {
1706 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1707 iavf_stat_str(hw, status),
1708 iavf_aq_str(hw, hw->aq.asq_last_status));
1709 return iavf_status_to_errno(status);
1710 }
1711
1712 return 0;
1713
1714 }
1715
1716 /**
1717 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1718 * @adapter: board private structure
1719 *
1720 * Returns 0 on success, negative on failure
1721 **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1722 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1723 {
1724 struct iavf_hw *hw = &adapter->hw;
1725 u32 *dw;
1726 u16 i;
1727
1728 dw = (u32 *)adapter->rss_key;
1729 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1730 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1731
1732 dw = (u32 *)adapter->rss_lut;
1733 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1734 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1735
1736 iavf_flush(hw);
1737
1738 return 0;
1739 }
1740
1741 /**
1742 * iavf_config_rss - Configure RSS keys and lut
1743 * @adapter: board private structure
1744 *
1745 * Returns 0 on success, negative on failure
1746 **/
iavf_config_rss(struct iavf_adapter * adapter)1747 int iavf_config_rss(struct iavf_adapter *adapter)
1748 {
1749
1750 if (RSS_PF(adapter)) {
1751 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1752 IAVF_FLAG_AQ_SET_RSS_KEY;
1753 return 0;
1754 } else if (RSS_AQ(adapter)) {
1755 return iavf_config_rss_aq(adapter);
1756 } else {
1757 return iavf_config_rss_reg(adapter);
1758 }
1759 }
1760
1761 /**
1762 * iavf_fill_rss_lut - Fill the lut with default values
1763 * @adapter: board private structure
1764 **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1765 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1766 {
1767 u16 i;
1768
1769 for (i = 0; i < adapter->rss_lut_size; i++)
1770 adapter->rss_lut[i] = i % adapter->num_active_queues;
1771 }
1772
1773 /**
1774 * iavf_init_rss - Prepare for RSS
1775 * @adapter: board private structure
1776 *
1777 * Return 0 on success, negative on failure
1778 **/
iavf_init_rss(struct iavf_adapter * adapter)1779 static int iavf_init_rss(struct iavf_adapter *adapter)
1780 {
1781 struct iavf_hw *hw = &adapter->hw;
1782
1783 if (!RSS_PF(adapter)) {
1784 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1785 if (adapter->vf_res->vf_cap_flags &
1786 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1787 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1788 else
1789 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1790
1791 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1792 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1793 }
1794
1795 iavf_fill_rss_lut(adapter);
1796 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1797
1798 return iavf_config_rss(adapter);
1799 }
1800
1801 /**
1802 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1803 * @adapter: board private structure to initialize
1804 *
1805 * We allocate one q_vector per queue interrupt. If allocation fails we
1806 * return -ENOMEM.
1807 **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1808 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1809 {
1810 int q_idx = 0, num_q_vectors;
1811 struct iavf_q_vector *q_vector;
1812
1813 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1814 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1815 GFP_KERNEL);
1816 if (!adapter->q_vectors)
1817 return -ENOMEM;
1818
1819 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1820 q_vector = &adapter->q_vectors[q_idx];
1821 q_vector->adapter = adapter;
1822 q_vector->vsi = &adapter->vsi;
1823 q_vector->v_idx = q_idx;
1824 q_vector->reg_idx = q_idx;
1825 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1826 netif_napi_add(adapter->netdev, &q_vector->napi,
1827 iavf_napi_poll);
1828 }
1829
1830 return 0;
1831 }
1832
1833 /**
1834 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1835 * @adapter: board private structure to initialize
1836 *
1837 * This function frees the memory allocated to the q_vectors. In addition if
1838 * NAPI is enabled it will delete any references to the NAPI struct prior
1839 * to freeing the q_vector.
1840 **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1841 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1842 {
1843 int q_idx, num_q_vectors;
1844 int napi_vectors;
1845
1846 if (!adapter->q_vectors)
1847 return;
1848
1849 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1850 napi_vectors = adapter->num_active_queues;
1851
1852 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1853 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1854
1855 if (q_idx < napi_vectors)
1856 netif_napi_del(&q_vector->napi);
1857 }
1858 kfree(adapter->q_vectors);
1859 adapter->q_vectors = NULL;
1860 }
1861
1862 /**
1863 * iavf_reset_interrupt_capability - Reset MSIX setup
1864 * @adapter: board private structure
1865 *
1866 **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1867 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1868 {
1869 if (!adapter->msix_entries)
1870 return;
1871
1872 pci_disable_msix(adapter->pdev);
1873 kfree(adapter->msix_entries);
1874 adapter->msix_entries = NULL;
1875 }
1876
1877 /**
1878 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1879 * @adapter: board private structure to initialize
1880 *
1881 **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1882 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1883 {
1884 int err;
1885
1886 err = iavf_alloc_queues(adapter);
1887 if (err) {
1888 dev_err(&adapter->pdev->dev,
1889 "Unable to allocate memory for queues\n");
1890 goto err_alloc_queues;
1891 }
1892
1893 rtnl_lock();
1894 err = iavf_set_interrupt_capability(adapter);
1895 rtnl_unlock();
1896 if (err) {
1897 dev_err(&adapter->pdev->dev,
1898 "Unable to setup interrupt capabilities\n");
1899 goto err_set_interrupt;
1900 }
1901
1902 err = iavf_alloc_q_vectors(adapter);
1903 if (err) {
1904 dev_err(&adapter->pdev->dev,
1905 "Unable to allocate memory for queue vectors\n");
1906 goto err_alloc_q_vectors;
1907 }
1908
1909 /* If we've made it so far while ADq flag being ON, then we haven't
1910 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1911 * resources have been allocated in the reset path.
1912 * Now we can truly claim that ADq is enabled.
1913 */
1914 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1915 adapter->num_tc)
1916 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1917 adapter->num_tc);
1918
1919 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1920 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1921 adapter->num_active_queues);
1922
1923 return 0;
1924 err_alloc_q_vectors:
1925 iavf_reset_interrupt_capability(adapter);
1926 err_set_interrupt:
1927 iavf_free_queues(adapter);
1928 err_alloc_queues:
1929 return err;
1930 }
1931
1932 /**
1933 * iavf_free_rss - Free memory used by RSS structs
1934 * @adapter: board private structure
1935 **/
iavf_free_rss(struct iavf_adapter * adapter)1936 static void iavf_free_rss(struct iavf_adapter *adapter)
1937 {
1938 kfree(adapter->rss_key);
1939 adapter->rss_key = NULL;
1940
1941 kfree(adapter->rss_lut);
1942 adapter->rss_lut = NULL;
1943 }
1944
1945 /**
1946 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1947 * @adapter: board private structure
1948 *
1949 * Returns 0 on success, negative on failure
1950 **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter)1951 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1952 {
1953 struct net_device *netdev = adapter->netdev;
1954 int err;
1955
1956 if (netif_running(netdev))
1957 iavf_free_traffic_irqs(adapter);
1958 iavf_free_misc_irq(adapter);
1959 iavf_reset_interrupt_capability(adapter);
1960 iavf_free_q_vectors(adapter);
1961 iavf_free_queues(adapter);
1962
1963 err = iavf_init_interrupt_scheme(adapter);
1964 if (err)
1965 goto err;
1966
1967 netif_tx_stop_all_queues(netdev);
1968
1969 err = iavf_request_misc_irq(adapter);
1970 if (err)
1971 goto err;
1972
1973 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1974
1975 iavf_map_rings_to_vectors(adapter);
1976 err:
1977 return err;
1978 }
1979
1980 /**
1981 * iavf_process_aq_command - process aq_required flags
1982 * and sends aq command
1983 * @adapter: pointer to iavf adapter structure
1984 *
1985 * Returns 0 on success
1986 * Returns error code if no command was sent
1987 * or error code if the command failed.
1988 **/
iavf_process_aq_command(struct iavf_adapter * adapter)1989 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1990 {
1991 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1992 return iavf_send_vf_config_msg(adapter);
1993 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1994 return iavf_send_vf_offload_vlan_v2_msg(adapter);
1995 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1996 iavf_disable_queues(adapter);
1997 return 0;
1998 }
1999
2000 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2001 iavf_map_queues(adapter);
2002 return 0;
2003 }
2004
2005 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2006 iavf_add_ether_addrs(adapter);
2007 return 0;
2008 }
2009
2010 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2011 iavf_add_vlans(adapter);
2012 return 0;
2013 }
2014
2015 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2016 iavf_del_ether_addrs(adapter);
2017 return 0;
2018 }
2019
2020 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2021 iavf_del_vlans(adapter);
2022 return 0;
2023 }
2024
2025 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2026 iavf_enable_vlan_stripping(adapter);
2027 return 0;
2028 }
2029
2030 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2031 iavf_disable_vlan_stripping(adapter);
2032 return 0;
2033 }
2034
2035 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2036 iavf_configure_queues(adapter);
2037 return 0;
2038 }
2039
2040 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2041 iavf_enable_queues(adapter);
2042 return 0;
2043 }
2044
2045 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2046 /* This message goes straight to the firmware, not the
2047 * PF, so we don't have to set current_op as we will
2048 * not get a response through the ARQ.
2049 */
2050 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2051 return 0;
2052 }
2053 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2054 iavf_get_hena(adapter);
2055 return 0;
2056 }
2057 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2058 iavf_set_hena(adapter);
2059 return 0;
2060 }
2061 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2062 iavf_set_rss_key(adapter);
2063 return 0;
2064 }
2065 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2066 iavf_set_rss_lut(adapter);
2067 return 0;
2068 }
2069
2070 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2071 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2072 FLAG_VF_MULTICAST_PROMISC);
2073 return 0;
2074 }
2075
2076 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2077 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2078 return 0;
2079 }
2080 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2081 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2082 iavf_set_promiscuous(adapter, 0);
2083 return 0;
2084 }
2085
2086 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2087 iavf_enable_channels(adapter);
2088 return 0;
2089 }
2090
2091 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2092 iavf_disable_channels(adapter);
2093 return 0;
2094 }
2095 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2096 iavf_add_cloud_filter(adapter);
2097 return 0;
2098 }
2099
2100 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2101 iavf_del_cloud_filter(adapter);
2102 return 0;
2103 }
2104 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2105 iavf_del_cloud_filter(adapter);
2106 return 0;
2107 }
2108 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2109 iavf_add_cloud_filter(adapter);
2110 return 0;
2111 }
2112 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2113 iavf_add_fdir_filter(adapter);
2114 return IAVF_SUCCESS;
2115 }
2116 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2117 iavf_del_fdir_filter(adapter);
2118 return IAVF_SUCCESS;
2119 }
2120 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2121 iavf_add_adv_rss_cfg(adapter);
2122 return 0;
2123 }
2124 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2125 iavf_del_adv_rss_cfg(adapter);
2126 return 0;
2127 }
2128 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2129 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2130 return 0;
2131 }
2132 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2133 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2134 return 0;
2135 }
2136 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2137 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2138 return 0;
2139 }
2140 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2141 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2142 return 0;
2143 }
2144 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2145 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2146 return 0;
2147 }
2148 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2149 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2150 return 0;
2151 }
2152 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2153 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2154 return 0;
2155 }
2156 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2157 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2158 return 0;
2159 }
2160
2161 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2162 iavf_request_stats(adapter);
2163 return 0;
2164 }
2165
2166 return -EAGAIN;
2167 }
2168
2169 /**
2170 * iavf_set_vlan_offload_features - set VLAN offload configuration
2171 * @adapter: board private structure
2172 * @prev_features: previous features used for comparison
2173 * @features: updated features used for configuration
2174 *
2175 * Set the aq_required bit(s) based on the requested features passed in to
2176 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2177 * the watchdog if any changes are requested to expedite the request via
2178 * virtchnl.
2179 **/
2180 void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)2181 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2182 netdev_features_t prev_features,
2183 netdev_features_t features)
2184 {
2185 bool enable_stripping = true, enable_insertion = true;
2186 u16 vlan_ethertype = 0;
2187 u64 aq_required = 0;
2188
2189 /* keep cases separate because one ethertype for offloads can be
2190 * disabled at the same time as another is disabled, so check for an
2191 * enabled ethertype first, then check for disabled. Default to
2192 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2193 * stripping.
2194 */
2195 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2196 vlan_ethertype = ETH_P_8021AD;
2197 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2198 vlan_ethertype = ETH_P_8021Q;
2199 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2200 vlan_ethertype = ETH_P_8021AD;
2201 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2202 vlan_ethertype = ETH_P_8021Q;
2203 else
2204 vlan_ethertype = ETH_P_8021Q;
2205
2206 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2207 enable_stripping = false;
2208 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2209 enable_insertion = false;
2210
2211 if (VLAN_ALLOWED(adapter)) {
2212 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2213 * stripping via virtchnl. VLAN insertion can be toggled on the
2214 * netdev, but it doesn't require a virtchnl message
2215 */
2216 if (enable_stripping)
2217 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2218 else
2219 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2220
2221 } else if (VLAN_V2_ALLOWED(adapter)) {
2222 switch (vlan_ethertype) {
2223 case ETH_P_8021Q:
2224 if (enable_stripping)
2225 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2226 else
2227 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2228
2229 if (enable_insertion)
2230 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2231 else
2232 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2233 break;
2234 case ETH_P_8021AD:
2235 if (enable_stripping)
2236 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2237 else
2238 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2239
2240 if (enable_insertion)
2241 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2242 else
2243 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2244 break;
2245 }
2246 }
2247
2248 if (aq_required) {
2249 adapter->aq_required |= aq_required;
2250 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
2251 }
2252 }
2253
2254 /**
2255 * iavf_startup - first step of driver startup
2256 * @adapter: board private structure
2257 *
2258 * Function process __IAVF_STARTUP driver state.
2259 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2260 * when fails the state is changed to __IAVF_INIT_FAILED
2261 **/
iavf_startup(struct iavf_adapter * adapter)2262 static void iavf_startup(struct iavf_adapter *adapter)
2263 {
2264 struct pci_dev *pdev = adapter->pdev;
2265 struct iavf_hw *hw = &adapter->hw;
2266 enum iavf_status status;
2267 int ret;
2268
2269 WARN_ON(adapter->state != __IAVF_STARTUP);
2270
2271 /* driver loaded, probe complete */
2272 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2273 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2274 status = iavf_set_mac_type(hw);
2275 if (status) {
2276 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2277 goto err;
2278 }
2279
2280 ret = iavf_check_reset_complete(hw);
2281 if (ret) {
2282 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2283 ret);
2284 goto err;
2285 }
2286 hw->aq.num_arq_entries = IAVF_AQ_LEN;
2287 hw->aq.num_asq_entries = IAVF_AQ_LEN;
2288 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2289 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2290
2291 status = iavf_init_adminq(hw);
2292 if (status) {
2293 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2294 status);
2295 goto err;
2296 }
2297 ret = iavf_send_api_ver(adapter);
2298 if (ret) {
2299 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2300 iavf_shutdown_adminq(hw);
2301 goto err;
2302 }
2303 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2304 return;
2305 err:
2306 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2307 }
2308
2309 /**
2310 * iavf_init_version_check - second step of driver startup
2311 * @adapter: board private structure
2312 *
2313 * Function process __IAVF_INIT_VERSION_CHECK driver state.
2314 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2315 * when fails the state is changed to __IAVF_INIT_FAILED
2316 **/
iavf_init_version_check(struct iavf_adapter * adapter)2317 static void iavf_init_version_check(struct iavf_adapter *adapter)
2318 {
2319 struct pci_dev *pdev = adapter->pdev;
2320 struct iavf_hw *hw = &adapter->hw;
2321 int err = -EAGAIN;
2322
2323 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2324
2325 if (!iavf_asq_done(hw)) {
2326 dev_err(&pdev->dev, "Admin queue command never completed\n");
2327 iavf_shutdown_adminq(hw);
2328 iavf_change_state(adapter, __IAVF_STARTUP);
2329 goto err;
2330 }
2331
2332 /* aq msg sent, awaiting reply */
2333 err = iavf_verify_api_ver(adapter);
2334 if (err) {
2335 if (err == -EALREADY)
2336 err = iavf_send_api_ver(adapter);
2337 else
2338 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2339 adapter->pf_version.major,
2340 adapter->pf_version.minor,
2341 VIRTCHNL_VERSION_MAJOR,
2342 VIRTCHNL_VERSION_MINOR);
2343 goto err;
2344 }
2345 err = iavf_send_vf_config_msg(adapter);
2346 if (err) {
2347 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2348 err);
2349 goto err;
2350 }
2351 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2352 return;
2353 err:
2354 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2355 }
2356
2357 /**
2358 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2359 * @adapter: board private structure
2360 */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2361 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2362 {
2363 int i, num_req_queues = adapter->num_req_queues;
2364 struct iavf_vsi *vsi = &adapter->vsi;
2365
2366 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2367 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2368 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2369 }
2370 if (!adapter->vsi_res) {
2371 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2372 return -ENODEV;
2373 }
2374
2375 if (num_req_queues &&
2376 num_req_queues > adapter->vsi_res->num_queue_pairs) {
2377 /* Problem. The PF gave us fewer queues than what we had
2378 * negotiated in our request. Need a reset to see if we can't
2379 * get back to a working state.
2380 */
2381 dev_err(&adapter->pdev->dev,
2382 "Requested %d queues, but PF only gave us %d.\n",
2383 num_req_queues,
2384 adapter->vsi_res->num_queue_pairs);
2385 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2386 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2387 iavf_schedule_reset(adapter);
2388
2389 return -EAGAIN;
2390 }
2391 adapter->num_req_queues = 0;
2392 adapter->vsi.id = adapter->vsi_res->vsi_id;
2393
2394 adapter->vsi.back = adapter;
2395 adapter->vsi.base_vector = 1;
2396 vsi->netdev = adapter->netdev;
2397 vsi->qs_handle = adapter->vsi_res->qset_handle;
2398 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2399 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2400 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2401 } else {
2402 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2403 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2404 }
2405
2406 return 0;
2407 }
2408
2409 /**
2410 * iavf_init_get_resources - third step of driver startup
2411 * @adapter: board private structure
2412 *
2413 * Function process __IAVF_INIT_GET_RESOURCES driver state and
2414 * finishes driver initialization procedure.
2415 * When success the state is changed to __IAVF_DOWN
2416 * when fails the state is changed to __IAVF_INIT_FAILED
2417 **/
iavf_init_get_resources(struct iavf_adapter * adapter)2418 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2419 {
2420 struct pci_dev *pdev = adapter->pdev;
2421 struct iavf_hw *hw = &adapter->hw;
2422 int err;
2423
2424 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2425 /* aq msg sent, awaiting reply */
2426 if (!adapter->vf_res) {
2427 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2428 GFP_KERNEL);
2429 if (!adapter->vf_res) {
2430 err = -ENOMEM;
2431 goto err;
2432 }
2433 }
2434 err = iavf_get_vf_config(adapter);
2435 if (err == -EALREADY) {
2436 err = iavf_send_vf_config_msg(adapter);
2437 goto err;
2438 } else if (err == -EINVAL) {
2439 /* We only get -EINVAL if the device is in a very bad
2440 * state or if we've been disabled for previous bad
2441 * behavior. Either way, we're done now.
2442 */
2443 iavf_shutdown_adminq(hw);
2444 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2445 return;
2446 }
2447 if (err) {
2448 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2449 goto err_alloc;
2450 }
2451
2452 err = iavf_parse_vf_resource_msg(adapter);
2453 if (err) {
2454 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2455 err);
2456 goto err_alloc;
2457 }
2458 /* Some features require additional messages to negotiate extended
2459 * capabilities. These are processed in sequence by the
2460 * __IAVF_INIT_EXTENDED_CAPS driver state.
2461 */
2462 adapter->extended_caps = IAVF_EXTENDED_CAPS;
2463
2464 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2465 return;
2466
2467 err_alloc:
2468 kfree(adapter->vf_res);
2469 adapter->vf_res = NULL;
2470 err:
2471 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2472 }
2473
2474 /**
2475 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2476 * @adapter: board private structure
2477 *
2478 * Function processes send of the extended VLAN V2 capability message to the
2479 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2480 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2481 */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)2482 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2483 {
2484 int ret;
2485
2486 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2487
2488 ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2489 if (ret && ret == -EOPNOTSUPP) {
2490 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2491 * we did not send the capability exchange message and do not
2492 * expect a response.
2493 */
2494 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2495 }
2496
2497 /* We sent the message, so move on to the next step */
2498 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2499 }
2500
2501 /**
2502 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2503 * @adapter: board private structure
2504 *
2505 * Function processes receipt of the extended VLAN V2 capability message from
2506 * the PF.
2507 **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)2508 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2509 {
2510 int ret;
2511
2512 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2513
2514 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2515
2516 ret = iavf_get_vf_vlan_v2_caps(adapter);
2517 if (ret)
2518 goto err;
2519
2520 /* We've processed receipt of the VLAN V2 caps message */
2521 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2522 return;
2523 err:
2524 /* We didn't receive a reply. Make sure we try sending again when
2525 * __IAVF_INIT_FAILED attempts to recover.
2526 */
2527 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2528 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2529 }
2530
2531 /**
2532 * iavf_init_process_extended_caps - Part of driver startup
2533 * @adapter: board private structure
2534 *
2535 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2536 * handles negotiating capabilities for features which require an additional
2537 * message.
2538 *
2539 * Once all extended capabilities exchanges are finished, the driver will
2540 * transition into __IAVF_INIT_CONFIG_ADAPTER.
2541 */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)2542 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2543 {
2544 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2545
2546 /* Process capability exchange for VLAN V2 */
2547 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2548 iavf_init_send_offload_vlan_v2_caps(adapter);
2549 return;
2550 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2551 iavf_init_recv_offload_vlan_v2_caps(adapter);
2552 return;
2553 }
2554
2555 /* When we reach here, no further extended capabilities exchanges are
2556 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2557 */
2558 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2559 }
2560
2561 /**
2562 * iavf_init_config_adapter - last part of driver startup
2563 * @adapter: board private structure
2564 *
2565 * After all the supported capabilities are negotiated, then the
2566 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2567 */
iavf_init_config_adapter(struct iavf_adapter * adapter)2568 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2569 {
2570 struct net_device *netdev = adapter->netdev;
2571 struct pci_dev *pdev = adapter->pdev;
2572 int err;
2573
2574 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2575
2576 if (iavf_process_config(adapter))
2577 goto err;
2578
2579 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2580
2581 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2582
2583 netdev->netdev_ops = &iavf_netdev_ops;
2584 iavf_set_ethtool_ops(netdev);
2585 netdev->watchdog_timeo = 5 * HZ;
2586
2587 /* MTU range: 68 - 9710 */
2588 netdev->min_mtu = ETH_MIN_MTU;
2589 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2590
2591 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2592 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2593 adapter->hw.mac.addr);
2594 eth_hw_addr_random(netdev);
2595 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2596 } else {
2597 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2598 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2599 }
2600
2601 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2602 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2603 err = iavf_init_interrupt_scheme(adapter);
2604 if (err)
2605 goto err_sw_init;
2606 iavf_map_rings_to_vectors(adapter);
2607 if (adapter->vf_res->vf_cap_flags &
2608 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2609 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2610
2611 err = iavf_request_misc_irq(adapter);
2612 if (err)
2613 goto err_sw_init;
2614
2615 netif_carrier_off(netdev);
2616 adapter->link_up = false;
2617
2618 /* set the semaphore to prevent any callbacks after device registration
2619 * up to time when state of driver will be set to __IAVF_DOWN
2620 */
2621 rtnl_lock();
2622 if (!adapter->netdev_registered) {
2623 err = register_netdevice(netdev);
2624 if (err) {
2625 rtnl_unlock();
2626 goto err_register;
2627 }
2628 }
2629
2630 adapter->netdev_registered = true;
2631
2632 netif_tx_stop_all_queues(netdev);
2633 if (CLIENT_ALLOWED(adapter)) {
2634 err = iavf_lan_add_device(adapter);
2635 if (err)
2636 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2637 err);
2638 }
2639 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2640 if (netdev->features & NETIF_F_GRO)
2641 dev_info(&pdev->dev, "GRO is enabled\n");
2642
2643 iavf_change_state(adapter, __IAVF_DOWN);
2644 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2645 rtnl_unlock();
2646
2647 iavf_misc_irq_enable(adapter);
2648 wake_up(&adapter->down_waitqueue);
2649
2650 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2651 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2652 if (!adapter->rss_key || !adapter->rss_lut) {
2653 err = -ENOMEM;
2654 goto err_mem;
2655 }
2656 if (RSS_AQ(adapter))
2657 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2658 else
2659 iavf_init_rss(adapter);
2660
2661 if (VLAN_V2_ALLOWED(adapter))
2662 /* request initial VLAN offload settings */
2663 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2664
2665 return;
2666 err_mem:
2667 iavf_free_rss(adapter);
2668 err_register:
2669 iavf_free_misc_irq(adapter);
2670 err_sw_init:
2671 iavf_reset_interrupt_capability(adapter);
2672 err:
2673 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2674 }
2675
2676 /**
2677 * iavf_watchdog_task - Periodic call-back task
2678 * @work: pointer to work_struct
2679 **/
iavf_watchdog_task(struct work_struct * work)2680 static void iavf_watchdog_task(struct work_struct *work)
2681 {
2682 struct iavf_adapter *adapter = container_of(work,
2683 struct iavf_adapter,
2684 watchdog_task.work);
2685 struct iavf_hw *hw = &adapter->hw;
2686 u32 reg_val;
2687
2688 if (!mutex_trylock(&adapter->crit_lock)) {
2689 if (adapter->state == __IAVF_REMOVE)
2690 return;
2691
2692 goto restart_watchdog;
2693 }
2694
2695 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2696 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2697
2698 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2699 adapter->aq_required = 0;
2700 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2701 mutex_unlock(&adapter->crit_lock);
2702 queue_work(adapter->wq, &adapter->reset_task);
2703 return;
2704 }
2705
2706 switch (adapter->state) {
2707 case __IAVF_STARTUP:
2708 iavf_startup(adapter);
2709 mutex_unlock(&adapter->crit_lock);
2710 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2711 msecs_to_jiffies(30));
2712 return;
2713 case __IAVF_INIT_VERSION_CHECK:
2714 iavf_init_version_check(adapter);
2715 mutex_unlock(&adapter->crit_lock);
2716 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2717 msecs_to_jiffies(30));
2718 return;
2719 case __IAVF_INIT_GET_RESOURCES:
2720 iavf_init_get_resources(adapter);
2721 mutex_unlock(&adapter->crit_lock);
2722 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2723 msecs_to_jiffies(1));
2724 return;
2725 case __IAVF_INIT_EXTENDED_CAPS:
2726 iavf_init_process_extended_caps(adapter);
2727 mutex_unlock(&adapter->crit_lock);
2728 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2729 msecs_to_jiffies(1));
2730 return;
2731 case __IAVF_INIT_CONFIG_ADAPTER:
2732 iavf_init_config_adapter(adapter);
2733 mutex_unlock(&adapter->crit_lock);
2734 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2735 msecs_to_jiffies(1));
2736 return;
2737 case __IAVF_INIT_FAILED:
2738 if (test_bit(__IAVF_IN_REMOVE_TASK,
2739 &adapter->crit_section)) {
2740 /* Do not update the state and do not reschedule
2741 * watchdog task, iavf_remove should handle this state
2742 * as it can loop forever
2743 */
2744 mutex_unlock(&adapter->crit_lock);
2745 return;
2746 }
2747 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2748 dev_err(&adapter->pdev->dev,
2749 "Failed to communicate with PF; waiting before retry\n");
2750 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2751 iavf_shutdown_adminq(hw);
2752 mutex_unlock(&adapter->crit_lock);
2753 queue_delayed_work(adapter->wq,
2754 &adapter->watchdog_task, (5 * HZ));
2755 return;
2756 }
2757 /* Try again from failed step*/
2758 iavf_change_state(adapter, adapter->last_state);
2759 mutex_unlock(&adapter->crit_lock);
2760 queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2761 return;
2762 case __IAVF_COMM_FAILED:
2763 if (test_bit(__IAVF_IN_REMOVE_TASK,
2764 &adapter->crit_section)) {
2765 /* Set state to __IAVF_INIT_FAILED and perform remove
2766 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2767 * doesn't bring the state back to __IAVF_COMM_FAILED.
2768 */
2769 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2770 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2771 mutex_unlock(&adapter->crit_lock);
2772 return;
2773 }
2774 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2775 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2776 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2777 reg_val == VIRTCHNL_VFR_COMPLETED) {
2778 /* A chance for redemption! */
2779 dev_err(&adapter->pdev->dev,
2780 "Hardware came out of reset. Attempting reinit.\n");
2781 /* When init task contacts the PF and
2782 * gets everything set up again, it'll restart the
2783 * watchdog for us. Down, boy. Sit. Stay. Woof.
2784 */
2785 iavf_change_state(adapter, __IAVF_STARTUP);
2786 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2787 }
2788 adapter->aq_required = 0;
2789 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2790 mutex_unlock(&adapter->crit_lock);
2791 queue_delayed_work(adapter->wq,
2792 &adapter->watchdog_task,
2793 msecs_to_jiffies(10));
2794 return;
2795 case __IAVF_RESETTING:
2796 mutex_unlock(&adapter->crit_lock);
2797 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2798 HZ * 2);
2799 return;
2800 case __IAVF_DOWN:
2801 case __IAVF_DOWN_PENDING:
2802 case __IAVF_TESTING:
2803 case __IAVF_RUNNING:
2804 if (adapter->current_op) {
2805 if (!iavf_asq_done(hw)) {
2806 dev_dbg(&adapter->pdev->dev,
2807 "Admin queue timeout\n");
2808 iavf_send_api_ver(adapter);
2809 }
2810 } else {
2811 int ret = iavf_process_aq_command(adapter);
2812
2813 /* An error will be returned if no commands were
2814 * processed; use this opportunity to update stats
2815 * if the error isn't -ENOTSUPP
2816 */
2817 if (ret && ret != -EOPNOTSUPP &&
2818 adapter->state == __IAVF_RUNNING)
2819 iavf_request_stats(adapter);
2820 }
2821 if (adapter->state == __IAVF_RUNNING)
2822 iavf_detect_recover_hung(&adapter->vsi);
2823 break;
2824 case __IAVF_REMOVE:
2825 default:
2826 mutex_unlock(&adapter->crit_lock);
2827 return;
2828 }
2829
2830 /* check for hw reset */
2831 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2832 if (!reg_val) {
2833 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2834 adapter->aq_required = 0;
2835 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2836 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2837 queue_work(adapter->wq, &adapter->reset_task);
2838 mutex_unlock(&adapter->crit_lock);
2839 queue_delayed_work(adapter->wq,
2840 &adapter->watchdog_task, HZ * 2);
2841 return;
2842 }
2843
2844 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2845 mutex_unlock(&adapter->crit_lock);
2846 restart_watchdog:
2847 if (adapter->state >= __IAVF_DOWN)
2848 queue_work(adapter->wq, &adapter->adminq_task);
2849 if (adapter->aq_required)
2850 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2851 msecs_to_jiffies(20));
2852 else
2853 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2854 HZ * 2);
2855 }
2856
2857 /**
2858 * iavf_disable_vf - disable VF
2859 * @adapter: board private structure
2860 *
2861 * Set communication failed flag and free all resources.
2862 * NOTE: This function is expected to be called with crit_lock being held.
2863 **/
iavf_disable_vf(struct iavf_adapter * adapter)2864 static void iavf_disable_vf(struct iavf_adapter *adapter)
2865 {
2866 struct iavf_mac_filter *f, *ftmp;
2867 struct iavf_vlan_filter *fv, *fvtmp;
2868 struct iavf_cloud_filter *cf, *cftmp;
2869
2870 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2871
2872 /* We don't use netif_running() because it may be true prior to
2873 * ndo_open() returning, so we can't assume it means all our open
2874 * tasks have finished, since we're not holding the rtnl_lock here.
2875 */
2876 if (adapter->state == __IAVF_RUNNING) {
2877 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2878 netif_carrier_off(adapter->netdev);
2879 netif_tx_disable(adapter->netdev);
2880 adapter->link_up = false;
2881 iavf_napi_disable_all(adapter);
2882 iavf_irq_disable(adapter);
2883 iavf_free_traffic_irqs(adapter);
2884 iavf_free_all_tx_resources(adapter);
2885 iavf_free_all_rx_resources(adapter);
2886 }
2887
2888 spin_lock_bh(&adapter->mac_vlan_list_lock);
2889
2890 /* Delete all of the filters */
2891 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2892 list_del(&f->list);
2893 kfree(f);
2894 }
2895
2896 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2897 list_del(&fv->list);
2898 kfree(fv);
2899 }
2900
2901 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2902
2903 spin_lock_bh(&adapter->cloud_filter_list_lock);
2904 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2905 list_del(&cf->list);
2906 kfree(cf);
2907 adapter->num_cloud_filters--;
2908 }
2909 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2910
2911 iavf_free_misc_irq(adapter);
2912 iavf_reset_interrupt_capability(adapter);
2913 iavf_free_q_vectors(adapter);
2914 iavf_free_queues(adapter);
2915 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2916 iavf_shutdown_adminq(&adapter->hw);
2917 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2918 iavf_change_state(adapter, __IAVF_DOWN);
2919 wake_up(&adapter->down_waitqueue);
2920 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2921 }
2922
2923 /**
2924 * iavf_reset_task - Call-back task to handle hardware reset
2925 * @work: pointer to work_struct
2926 *
2927 * During reset we need to shut down and reinitialize the admin queue
2928 * before we can use it to communicate with the PF again. We also clear
2929 * and reinit the rings because that context is lost as well.
2930 **/
iavf_reset_task(struct work_struct * work)2931 static void iavf_reset_task(struct work_struct *work)
2932 {
2933 struct iavf_adapter *adapter = container_of(work,
2934 struct iavf_adapter,
2935 reset_task);
2936 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2937 struct net_device *netdev = adapter->netdev;
2938 struct iavf_hw *hw = &adapter->hw;
2939 struct iavf_mac_filter *f, *ftmp;
2940 struct iavf_cloud_filter *cf;
2941 enum iavf_status status;
2942 u32 reg_val;
2943 int i = 0, err;
2944 bool running;
2945
2946 /* Detach interface to avoid subsequent NDO callbacks */
2947 rtnl_lock();
2948 netif_device_detach(netdev);
2949 rtnl_unlock();
2950
2951 /* When device is being removed it doesn't make sense to run the reset
2952 * task, just return in such a case.
2953 */
2954 if (!mutex_trylock(&adapter->crit_lock)) {
2955 if (adapter->state != __IAVF_REMOVE)
2956 queue_work(adapter->wq, &adapter->reset_task);
2957
2958 goto reset_finish;
2959 }
2960
2961 while (!mutex_trylock(&adapter->client_lock))
2962 usleep_range(500, 1000);
2963 if (CLIENT_ENABLED(adapter)) {
2964 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2965 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2966 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2967 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2968 cancel_delayed_work_sync(&adapter->client_task);
2969 iavf_notify_client_close(&adapter->vsi, true);
2970 }
2971 iavf_misc_irq_disable(adapter);
2972 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2973 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2974 /* Restart the AQ here. If we have been reset but didn't
2975 * detect it, or if the PF had to reinit, our AQ will be hosed.
2976 */
2977 iavf_shutdown_adminq(hw);
2978 iavf_init_adminq(hw);
2979 iavf_request_reset(adapter);
2980 }
2981 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2982
2983 /* poll until we see the reset actually happen */
2984 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2985 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2986 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2987 if (!reg_val)
2988 break;
2989 usleep_range(5000, 10000);
2990 }
2991 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2992 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2993 goto continue_reset; /* act like the reset happened */
2994 }
2995
2996 /* wait until the reset is complete and the PF is responding to us */
2997 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2998 /* sleep first to make sure a minimum wait time is met */
2999 msleep(IAVF_RESET_WAIT_MS);
3000
3001 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3002 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3003 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3004 break;
3005 }
3006
3007 pci_set_master(adapter->pdev);
3008 pci_restore_msi_state(adapter->pdev);
3009
3010 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3011 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3012 reg_val);
3013 iavf_disable_vf(adapter);
3014 mutex_unlock(&adapter->client_lock);
3015 mutex_unlock(&adapter->crit_lock);
3016 if (netif_running(netdev)) {
3017 rtnl_lock();
3018 dev_close(netdev);
3019 rtnl_unlock();
3020 }
3021 return; /* Do not attempt to reinit. It's dead, Jim. */
3022 }
3023
3024 continue_reset:
3025 /* We don't use netif_running() because it may be true prior to
3026 * ndo_open() returning, so we can't assume it means all our open
3027 * tasks have finished, since we're not holding the rtnl_lock here.
3028 */
3029 running = adapter->state == __IAVF_RUNNING;
3030
3031 if (running) {
3032 netif_carrier_off(netdev);
3033 netif_tx_stop_all_queues(netdev);
3034 adapter->link_up = false;
3035 iavf_napi_disable_all(adapter);
3036 }
3037 iavf_irq_disable(adapter);
3038
3039 iavf_change_state(adapter, __IAVF_RESETTING);
3040 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3041
3042 /* free the Tx/Rx rings and descriptors, might be better to just
3043 * re-use them sometime in the future
3044 */
3045 iavf_free_all_rx_resources(adapter);
3046 iavf_free_all_tx_resources(adapter);
3047
3048 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3049 /* kill and reinit the admin queue */
3050 iavf_shutdown_adminq(hw);
3051 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3052 status = iavf_init_adminq(hw);
3053 if (status) {
3054 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3055 status);
3056 goto reset_err;
3057 }
3058 adapter->aq_required = 0;
3059
3060 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3061 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3062 err = iavf_reinit_interrupt_scheme(adapter);
3063 if (err)
3064 goto reset_err;
3065 }
3066
3067 if (RSS_AQ(adapter)) {
3068 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3069 } else {
3070 err = iavf_init_rss(adapter);
3071 if (err)
3072 goto reset_err;
3073 }
3074
3075 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3076 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3077 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3078 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3079 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3080 * been successfully sent and negotiated
3081 */
3082 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3083 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3084
3085 spin_lock_bh(&adapter->mac_vlan_list_lock);
3086
3087 /* Delete filter for the current MAC address, it could have
3088 * been changed by the PF via administratively set MAC.
3089 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3090 */
3091 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3092 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3093 list_del(&f->list);
3094 kfree(f);
3095 }
3096 }
3097 /* re-add all MAC filters */
3098 list_for_each_entry(f, &adapter->mac_filter_list, list) {
3099 f->add = true;
3100 }
3101 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3102
3103 /* check if TCs are running and re-add all cloud filters */
3104 spin_lock_bh(&adapter->cloud_filter_list_lock);
3105 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3106 adapter->num_tc) {
3107 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3108 cf->add = true;
3109 }
3110 }
3111 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3112
3113 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3114 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3115 iavf_misc_irq_enable(adapter);
3116
3117 bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3118 bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3119
3120 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3121
3122 /* We were running when the reset started, so we need to restore some
3123 * state here.
3124 */
3125 if (running) {
3126 /* allocate transmit descriptors */
3127 err = iavf_setup_all_tx_resources(adapter);
3128 if (err)
3129 goto reset_err;
3130
3131 /* allocate receive descriptors */
3132 err = iavf_setup_all_rx_resources(adapter);
3133 if (err)
3134 goto reset_err;
3135
3136 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3137 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3138 err = iavf_request_traffic_irqs(adapter, netdev->name);
3139 if (err)
3140 goto reset_err;
3141
3142 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3143 }
3144
3145 iavf_configure(adapter);
3146
3147 /* iavf_up_complete() will switch device back
3148 * to __IAVF_RUNNING
3149 */
3150 iavf_up_complete(adapter);
3151
3152 iavf_irq_enable(adapter, true);
3153 } else {
3154 iavf_change_state(adapter, __IAVF_DOWN);
3155 wake_up(&adapter->down_waitqueue);
3156 }
3157
3158 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3159
3160 mutex_unlock(&adapter->client_lock);
3161 mutex_unlock(&adapter->crit_lock);
3162
3163 goto reset_finish;
3164 reset_err:
3165 if (running) {
3166 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3167 iavf_free_traffic_irqs(adapter);
3168 }
3169 iavf_disable_vf(adapter);
3170
3171 mutex_unlock(&adapter->client_lock);
3172 mutex_unlock(&adapter->crit_lock);
3173
3174 if (netif_running(netdev)) {
3175 /* Close device to ensure that Tx queues will not be started
3176 * during netif_device_attach() at the end of the reset task.
3177 */
3178 rtnl_lock();
3179 dev_close(netdev);
3180 rtnl_unlock();
3181 }
3182
3183 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3184 reset_finish:
3185 rtnl_lock();
3186 netif_device_attach(netdev);
3187 rtnl_unlock();
3188 }
3189
3190 /**
3191 * iavf_adminq_task - worker thread to clean the admin queue
3192 * @work: pointer to work_struct containing our data
3193 **/
iavf_adminq_task(struct work_struct * work)3194 static void iavf_adminq_task(struct work_struct *work)
3195 {
3196 struct iavf_adapter *adapter =
3197 container_of(work, struct iavf_adapter, adminq_task);
3198 struct iavf_hw *hw = &adapter->hw;
3199 struct iavf_arq_event_info event;
3200 enum virtchnl_ops v_op;
3201 enum iavf_status ret, v_ret;
3202 u32 val, oldval;
3203 u16 pending;
3204
3205 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3206 goto out;
3207
3208 if (!mutex_trylock(&adapter->crit_lock)) {
3209 if (adapter->state == __IAVF_REMOVE)
3210 return;
3211
3212 queue_work(adapter->wq, &adapter->adminq_task);
3213 goto out;
3214 }
3215
3216 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3217 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3218 if (!event.msg_buf)
3219 goto out;
3220
3221 do {
3222 ret = iavf_clean_arq_element(hw, &event, &pending);
3223 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3224 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3225
3226 if (ret || !v_op)
3227 break; /* No event to process or error cleaning ARQ */
3228
3229 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3230 event.msg_len);
3231 if (pending != 0)
3232 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3233 } while (pending);
3234 mutex_unlock(&adapter->crit_lock);
3235
3236 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3237 if (adapter->netdev_registered ||
3238 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3239 struct net_device *netdev = adapter->netdev;
3240
3241 rtnl_lock();
3242 netdev_update_features(netdev);
3243 rtnl_unlock();
3244 /* Request VLAN offload settings */
3245 if (VLAN_V2_ALLOWED(adapter))
3246 iavf_set_vlan_offload_features
3247 (adapter, 0, netdev->features);
3248
3249 iavf_set_queue_vlan_tag_loc(adapter);
3250 }
3251
3252 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3253 }
3254 if ((adapter->flags &
3255 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3256 adapter->state == __IAVF_RESETTING)
3257 goto freedom;
3258
3259 /* check for error indications */
3260 val = rd32(hw, hw->aq.arq.len);
3261 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3262 goto freedom;
3263 oldval = val;
3264 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3265 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3266 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3267 }
3268 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3269 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3270 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3271 }
3272 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3273 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3274 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3275 }
3276 if (oldval != val)
3277 wr32(hw, hw->aq.arq.len, val);
3278
3279 val = rd32(hw, hw->aq.asq.len);
3280 oldval = val;
3281 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3282 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3283 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3284 }
3285 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3286 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3287 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3288 }
3289 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3290 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3291 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3292 }
3293 if (oldval != val)
3294 wr32(hw, hw->aq.asq.len, val);
3295
3296 freedom:
3297 kfree(event.msg_buf);
3298 out:
3299 /* re-enable Admin queue interrupt cause */
3300 iavf_misc_irq_enable(adapter);
3301 }
3302
3303 /**
3304 * iavf_client_task - worker thread to perform client work
3305 * @work: pointer to work_struct containing our data
3306 *
3307 * This task handles client interactions. Because client calls can be
3308 * reentrant, we can't handle them in the watchdog.
3309 **/
iavf_client_task(struct work_struct * work)3310 static void iavf_client_task(struct work_struct *work)
3311 {
3312 struct iavf_adapter *adapter =
3313 container_of(work, struct iavf_adapter, client_task.work);
3314
3315 /* If we can't get the client bit, just give up. We'll be rescheduled
3316 * later.
3317 */
3318
3319 if (!mutex_trylock(&adapter->client_lock))
3320 return;
3321
3322 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3323 iavf_client_subtask(adapter);
3324 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3325 goto out;
3326 }
3327 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3328 iavf_notify_client_l2_params(&adapter->vsi);
3329 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3330 goto out;
3331 }
3332 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3333 iavf_notify_client_close(&adapter->vsi, false);
3334 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3335 goto out;
3336 }
3337 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3338 iavf_notify_client_open(&adapter->vsi);
3339 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3340 }
3341 out:
3342 mutex_unlock(&adapter->client_lock);
3343 }
3344
3345 /**
3346 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3347 * @adapter: board private structure
3348 *
3349 * Free all transmit software resources
3350 **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)3351 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3352 {
3353 int i;
3354
3355 if (!adapter->tx_rings)
3356 return;
3357
3358 for (i = 0; i < adapter->num_active_queues; i++)
3359 if (adapter->tx_rings[i].desc)
3360 iavf_free_tx_resources(&adapter->tx_rings[i]);
3361 }
3362
3363 /**
3364 * iavf_setup_all_tx_resources - allocate all queues Tx resources
3365 * @adapter: board private structure
3366 *
3367 * If this function returns with an error, then it's possible one or
3368 * more of the rings is populated (while the rest are not). It is the
3369 * callers duty to clean those orphaned rings.
3370 *
3371 * Return 0 on success, negative on failure
3372 **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)3373 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3374 {
3375 int i, err = 0;
3376
3377 for (i = 0; i < adapter->num_active_queues; i++) {
3378 adapter->tx_rings[i].count = adapter->tx_desc_count;
3379 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3380 if (!err)
3381 continue;
3382 dev_err(&adapter->pdev->dev,
3383 "Allocation for Tx Queue %u failed\n", i);
3384 break;
3385 }
3386
3387 return err;
3388 }
3389
3390 /**
3391 * iavf_setup_all_rx_resources - allocate all queues Rx resources
3392 * @adapter: board private structure
3393 *
3394 * If this function returns with an error, then it's possible one or
3395 * more of the rings is populated (while the rest are not). It is the
3396 * callers duty to clean those orphaned rings.
3397 *
3398 * Return 0 on success, negative on failure
3399 **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)3400 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3401 {
3402 int i, err = 0;
3403
3404 for (i = 0; i < adapter->num_active_queues; i++) {
3405 adapter->rx_rings[i].count = adapter->rx_desc_count;
3406 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3407 if (!err)
3408 continue;
3409 dev_err(&adapter->pdev->dev,
3410 "Allocation for Rx Queue %u failed\n", i);
3411 break;
3412 }
3413 return err;
3414 }
3415
3416 /**
3417 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3418 * @adapter: board private structure
3419 *
3420 * Free all receive software resources
3421 **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)3422 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3423 {
3424 int i;
3425
3426 if (!adapter->rx_rings)
3427 return;
3428
3429 for (i = 0; i < adapter->num_active_queues; i++)
3430 if (adapter->rx_rings[i].desc)
3431 iavf_free_rx_resources(&adapter->rx_rings[i]);
3432 }
3433
3434 /**
3435 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3436 * @adapter: board private structure
3437 * @max_tx_rate: max Tx bw for a tc
3438 **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)3439 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3440 u64 max_tx_rate)
3441 {
3442 int speed = 0, ret = 0;
3443
3444 if (ADV_LINK_SUPPORT(adapter)) {
3445 if (adapter->link_speed_mbps < U32_MAX) {
3446 speed = adapter->link_speed_mbps;
3447 goto validate_bw;
3448 } else {
3449 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3450 return -EINVAL;
3451 }
3452 }
3453
3454 switch (adapter->link_speed) {
3455 case VIRTCHNL_LINK_SPEED_40GB:
3456 speed = SPEED_40000;
3457 break;
3458 case VIRTCHNL_LINK_SPEED_25GB:
3459 speed = SPEED_25000;
3460 break;
3461 case VIRTCHNL_LINK_SPEED_20GB:
3462 speed = SPEED_20000;
3463 break;
3464 case VIRTCHNL_LINK_SPEED_10GB:
3465 speed = SPEED_10000;
3466 break;
3467 case VIRTCHNL_LINK_SPEED_5GB:
3468 speed = SPEED_5000;
3469 break;
3470 case VIRTCHNL_LINK_SPEED_2_5GB:
3471 speed = SPEED_2500;
3472 break;
3473 case VIRTCHNL_LINK_SPEED_1GB:
3474 speed = SPEED_1000;
3475 break;
3476 case VIRTCHNL_LINK_SPEED_100MB:
3477 speed = SPEED_100;
3478 break;
3479 default:
3480 break;
3481 }
3482
3483 validate_bw:
3484 if (max_tx_rate > speed) {
3485 dev_err(&adapter->pdev->dev,
3486 "Invalid tx rate specified\n");
3487 ret = -EINVAL;
3488 }
3489
3490 return ret;
3491 }
3492
3493 /**
3494 * iavf_validate_ch_config - validate queue mapping info
3495 * @adapter: board private structure
3496 * @mqprio_qopt: queue parameters
3497 *
3498 * This function validates if the config provided by the user to
3499 * configure queue channels is valid or not. Returns 0 on a valid
3500 * config.
3501 **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)3502 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3503 struct tc_mqprio_qopt_offload *mqprio_qopt)
3504 {
3505 u64 total_max_rate = 0;
3506 u32 tx_rate_rem = 0;
3507 int i, num_qps = 0;
3508 u64 tx_rate = 0;
3509 int ret = 0;
3510
3511 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3512 mqprio_qopt->qopt.num_tc < 1)
3513 return -EINVAL;
3514
3515 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3516 if (!mqprio_qopt->qopt.count[i] ||
3517 mqprio_qopt->qopt.offset[i] != num_qps)
3518 return -EINVAL;
3519 if (mqprio_qopt->min_rate[i]) {
3520 dev_err(&adapter->pdev->dev,
3521 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3522 i);
3523 return -EINVAL;
3524 }
3525
3526 /* convert to Mbps */
3527 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3528 IAVF_MBPS_DIVISOR);
3529
3530 if (mqprio_qopt->max_rate[i] &&
3531 tx_rate < IAVF_MBPS_QUANTA) {
3532 dev_err(&adapter->pdev->dev,
3533 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3534 i, IAVF_MBPS_QUANTA);
3535 return -EINVAL;
3536 }
3537
3538 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3539
3540 if (tx_rate_rem != 0) {
3541 dev_err(&adapter->pdev->dev,
3542 "Invalid max tx rate for TC%d, not divisible by %d\n",
3543 i, IAVF_MBPS_QUANTA);
3544 return -EINVAL;
3545 }
3546
3547 total_max_rate += tx_rate;
3548 num_qps += mqprio_qopt->qopt.count[i];
3549 }
3550 if (num_qps > adapter->num_active_queues) {
3551 dev_err(&adapter->pdev->dev,
3552 "Cannot support requested number of queues\n");
3553 return -EINVAL;
3554 }
3555
3556 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3557 return ret;
3558 }
3559
3560 /**
3561 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3562 * @adapter: board private structure
3563 **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)3564 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3565 {
3566 struct iavf_cloud_filter *cf, *cftmp;
3567
3568 spin_lock_bh(&adapter->cloud_filter_list_lock);
3569 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3570 list) {
3571 list_del(&cf->list);
3572 kfree(cf);
3573 adapter->num_cloud_filters--;
3574 }
3575 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3576 }
3577
3578 /**
3579 * __iavf_setup_tc - configure multiple traffic classes
3580 * @netdev: network interface device structure
3581 * @type_data: tc offload data
3582 *
3583 * This function processes the config information provided by the
3584 * user to configure traffic classes/queue channels and packages the
3585 * information to request the PF to setup traffic classes.
3586 *
3587 * Returns 0 on success.
3588 **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)3589 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3590 {
3591 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3592 struct iavf_adapter *adapter = netdev_priv(netdev);
3593 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3594 u8 num_tc = 0, total_qps = 0;
3595 int ret = 0, netdev_tc = 0;
3596 u64 max_tx_rate;
3597 u16 mode;
3598 int i;
3599
3600 num_tc = mqprio_qopt->qopt.num_tc;
3601 mode = mqprio_qopt->mode;
3602
3603 /* delete queue_channel */
3604 if (!mqprio_qopt->qopt.hw) {
3605 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3606 /* reset the tc configuration */
3607 netdev_reset_tc(netdev);
3608 adapter->num_tc = 0;
3609 netif_tx_stop_all_queues(netdev);
3610 netif_tx_disable(netdev);
3611 iavf_del_all_cloud_filters(adapter);
3612 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3613 total_qps = adapter->orig_num_active_queues;
3614 goto exit;
3615 } else {
3616 return -EINVAL;
3617 }
3618 }
3619
3620 /* add queue channel */
3621 if (mode == TC_MQPRIO_MODE_CHANNEL) {
3622 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3623 dev_err(&adapter->pdev->dev, "ADq not supported\n");
3624 return -EOPNOTSUPP;
3625 }
3626 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3627 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3628 return -EINVAL;
3629 }
3630
3631 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3632 if (ret)
3633 return ret;
3634 /* Return if same TC config is requested */
3635 if (adapter->num_tc == num_tc)
3636 return 0;
3637 adapter->num_tc = num_tc;
3638
3639 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3640 if (i < num_tc) {
3641 adapter->ch_config.ch_info[i].count =
3642 mqprio_qopt->qopt.count[i];
3643 adapter->ch_config.ch_info[i].offset =
3644 mqprio_qopt->qopt.offset[i];
3645 total_qps += mqprio_qopt->qopt.count[i];
3646 max_tx_rate = mqprio_qopt->max_rate[i];
3647 /* convert to Mbps */
3648 max_tx_rate = div_u64(max_tx_rate,
3649 IAVF_MBPS_DIVISOR);
3650 adapter->ch_config.ch_info[i].max_tx_rate =
3651 max_tx_rate;
3652 } else {
3653 adapter->ch_config.ch_info[i].count = 1;
3654 adapter->ch_config.ch_info[i].offset = 0;
3655 }
3656 }
3657
3658 /* Take snapshot of original config such as "num_active_queues"
3659 * It is used later when delete ADQ flow is exercised, so that
3660 * once delete ADQ flow completes, VF shall go back to its
3661 * original queue configuration
3662 */
3663
3664 adapter->orig_num_active_queues = adapter->num_active_queues;
3665
3666 /* Store queue info based on TC so that VF gets configured
3667 * with correct number of queues when VF completes ADQ config
3668 * flow
3669 */
3670 adapter->ch_config.total_qps = total_qps;
3671
3672 netif_tx_stop_all_queues(netdev);
3673 netif_tx_disable(netdev);
3674 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3675 netdev_reset_tc(netdev);
3676 /* Report the tc mapping up the stack */
3677 netdev_set_num_tc(adapter->netdev, num_tc);
3678 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3679 u16 qcount = mqprio_qopt->qopt.count[i];
3680 u16 qoffset = mqprio_qopt->qopt.offset[i];
3681
3682 if (i < num_tc)
3683 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3684 qoffset);
3685 }
3686 }
3687 exit:
3688 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3689 return 0;
3690
3691 netif_set_real_num_rx_queues(netdev, total_qps);
3692 netif_set_real_num_tx_queues(netdev, total_qps);
3693
3694 return ret;
3695 }
3696
3697 /**
3698 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3699 * @adapter: board private structure
3700 * @f: pointer to struct flow_cls_offload
3701 * @filter: pointer to cloud filter structure
3702 */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3703 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3704 struct flow_cls_offload *f,
3705 struct iavf_cloud_filter *filter)
3706 {
3707 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3708 struct flow_dissector *dissector = rule->match.dissector;
3709 u16 n_proto_mask = 0;
3710 u16 n_proto_key = 0;
3711 u8 field_flags = 0;
3712 u16 addr_type = 0;
3713 u16 n_proto = 0;
3714 int i = 0;
3715 struct virtchnl_filter *vf = &filter->f;
3716
3717 if (dissector->used_keys &
3718 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3719 BIT(FLOW_DISSECTOR_KEY_BASIC) |
3720 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3721 BIT(FLOW_DISSECTOR_KEY_VLAN) |
3722 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3723 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3724 BIT(FLOW_DISSECTOR_KEY_PORTS) |
3725 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3726 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3727 dissector->used_keys);
3728 return -EOPNOTSUPP;
3729 }
3730
3731 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3732 struct flow_match_enc_keyid match;
3733
3734 flow_rule_match_enc_keyid(rule, &match);
3735 if (match.mask->keyid != 0)
3736 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3737 }
3738
3739 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3740 struct flow_match_basic match;
3741
3742 flow_rule_match_basic(rule, &match);
3743 n_proto_key = ntohs(match.key->n_proto);
3744 n_proto_mask = ntohs(match.mask->n_proto);
3745
3746 if (n_proto_key == ETH_P_ALL) {
3747 n_proto_key = 0;
3748 n_proto_mask = 0;
3749 }
3750 n_proto = n_proto_key & n_proto_mask;
3751 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3752 return -EINVAL;
3753 if (n_proto == ETH_P_IPV6) {
3754 /* specify flow type as TCP IPv6 */
3755 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3756 }
3757
3758 if (match.key->ip_proto != IPPROTO_TCP) {
3759 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3760 return -EINVAL;
3761 }
3762 }
3763
3764 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3765 struct flow_match_eth_addrs match;
3766
3767 flow_rule_match_eth_addrs(rule, &match);
3768
3769 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3770 if (!is_zero_ether_addr(match.mask->dst)) {
3771 if (is_broadcast_ether_addr(match.mask->dst)) {
3772 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3773 } else {
3774 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3775 match.mask->dst);
3776 return -EINVAL;
3777 }
3778 }
3779
3780 if (!is_zero_ether_addr(match.mask->src)) {
3781 if (is_broadcast_ether_addr(match.mask->src)) {
3782 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3783 } else {
3784 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3785 match.mask->src);
3786 return -EINVAL;
3787 }
3788 }
3789
3790 if (!is_zero_ether_addr(match.key->dst))
3791 if (is_valid_ether_addr(match.key->dst) ||
3792 is_multicast_ether_addr(match.key->dst)) {
3793 /* set the mask if a valid dst_mac address */
3794 for (i = 0; i < ETH_ALEN; i++)
3795 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3796 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3797 match.key->dst);
3798 }
3799
3800 if (!is_zero_ether_addr(match.key->src))
3801 if (is_valid_ether_addr(match.key->src) ||
3802 is_multicast_ether_addr(match.key->src)) {
3803 /* set the mask if a valid dst_mac address */
3804 for (i = 0; i < ETH_ALEN; i++)
3805 vf->mask.tcp_spec.src_mac[i] |= 0xff;
3806 ether_addr_copy(vf->data.tcp_spec.src_mac,
3807 match.key->src);
3808 }
3809 }
3810
3811 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3812 struct flow_match_vlan match;
3813
3814 flow_rule_match_vlan(rule, &match);
3815 if (match.mask->vlan_id) {
3816 if (match.mask->vlan_id == VLAN_VID_MASK) {
3817 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3818 } else {
3819 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3820 match.mask->vlan_id);
3821 return -EINVAL;
3822 }
3823 }
3824 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3825 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3826 }
3827
3828 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3829 struct flow_match_control match;
3830
3831 flow_rule_match_control(rule, &match);
3832 addr_type = match.key->addr_type;
3833 }
3834
3835 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3836 struct flow_match_ipv4_addrs match;
3837
3838 flow_rule_match_ipv4_addrs(rule, &match);
3839 if (match.mask->dst) {
3840 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3841 field_flags |= IAVF_CLOUD_FIELD_IIP;
3842 } else {
3843 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3844 be32_to_cpu(match.mask->dst));
3845 return -EINVAL;
3846 }
3847 }
3848
3849 if (match.mask->src) {
3850 if (match.mask->src == cpu_to_be32(0xffffffff)) {
3851 field_flags |= IAVF_CLOUD_FIELD_IIP;
3852 } else {
3853 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3854 be32_to_cpu(match.mask->src));
3855 return -EINVAL;
3856 }
3857 }
3858
3859 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3860 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3861 return -EINVAL;
3862 }
3863 if (match.key->dst) {
3864 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3865 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3866 }
3867 if (match.key->src) {
3868 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3869 vf->data.tcp_spec.src_ip[0] = match.key->src;
3870 }
3871 }
3872
3873 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3874 struct flow_match_ipv6_addrs match;
3875
3876 flow_rule_match_ipv6_addrs(rule, &match);
3877
3878 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3879 if (ipv6_addr_any(&match.mask->dst)) {
3880 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3881 IPV6_ADDR_ANY);
3882 return -EINVAL;
3883 }
3884
3885 /* src and dest IPv6 address should not be LOOPBACK
3886 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3887 */
3888 if (ipv6_addr_loopback(&match.key->dst) ||
3889 ipv6_addr_loopback(&match.key->src)) {
3890 dev_err(&adapter->pdev->dev,
3891 "ipv6 addr should not be loopback\n");
3892 return -EINVAL;
3893 }
3894 if (!ipv6_addr_any(&match.mask->dst) ||
3895 !ipv6_addr_any(&match.mask->src))
3896 field_flags |= IAVF_CLOUD_FIELD_IIP;
3897
3898 for (i = 0; i < 4; i++)
3899 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3900 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3901 sizeof(vf->data.tcp_spec.dst_ip));
3902 for (i = 0; i < 4; i++)
3903 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3904 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3905 sizeof(vf->data.tcp_spec.src_ip));
3906 }
3907 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3908 struct flow_match_ports match;
3909
3910 flow_rule_match_ports(rule, &match);
3911 if (match.mask->src) {
3912 if (match.mask->src == cpu_to_be16(0xffff)) {
3913 field_flags |= IAVF_CLOUD_FIELD_IIP;
3914 } else {
3915 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3916 be16_to_cpu(match.mask->src));
3917 return -EINVAL;
3918 }
3919 }
3920
3921 if (match.mask->dst) {
3922 if (match.mask->dst == cpu_to_be16(0xffff)) {
3923 field_flags |= IAVF_CLOUD_FIELD_IIP;
3924 } else {
3925 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3926 be16_to_cpu(match.mask->dst));
3927 return -EINVAL;
3928 }
3929 }
3930 if (match.key->dst) {
3931 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3932 vf->data.tcp_spec.dst_port = match.key->dst;
3933 }
3934
3935 if (match.key->src) {
3936 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3937 vf->data.tcp_spec.src_port = match.key->src;
3938 }
3939 }
3940 vf->field_flags = field_flags;
3941
3942 return 0;
3943 }
3944
3945 /**
3946 * iavf_handle_tclass - Forward to a traffic class on the device
3947 * @adapter: board private structure
3948 * @tc: traffic class index on the device
3949 * @filter: pointer to cloud filter structure
3950 */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)3951 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3952 struct iavf_cloud_filter *filter)
3953 {
3954 if (tc == 0)
3955 return 0;
3956 if (tc < adapter->num_tc) {
3957 if (!filter->f.data.tcp_spec.dst_port) {
3958 dev_err(&adapter->pdev->dev,
3959 "Specify destination port to redirect to traffic class other than TC0\n");
3960 return -EINVAL;
3961 }
3962 }
3963 /* redirect to a traffic class on the same device */
3964 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3965 filter->f.action_meta = tc;
3966 return 0;
3967 }
3968
3969 /**
3970 * iavf_find_cf - Find the cloud filter in the list
3971 * @adapter: Board private structure
3972 * @cookie: filter specific cookie
3973 *
3974 * Returns ptr to the filter object or NULL. Must be called while holding the
3975 * cloud_filter_list_lock.
3976 */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)3977 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3978 unsigned long *cookie)
3979 {
3980 struct iavf_cloud_filter *filter = NULL;
3981
3982 if (!cookie)
3983 return NULL;
3984
3985 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3986 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3987 return filter;
3988 }
3989 return NULL;
3990 }
3991
3992 /**
3993 * iavf_configure_clsflower - Add tc flower filters
3994 * @adapter: board private structure
3995 * @cls_flower: Pointer to struct flow_cls_offload
3996 */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3997 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3998 struct flow_cls_offload *cls_flower)
3999 {
4000 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4001 struct iavf_cloud_filter *filter = NULL;
4002 int err = -EINVAL, count = 50;
4003
4004 if (tc < 0) {
4005 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4006 return -EINVAL;
4007 }
4008
4009 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4010 if (!filter)
4011 return -ENOMEM;
4012
4013 while (!mutex_trylock(&adapter->crit_lock)) {
4014 if (--count == 0) {
4015 kfree(filter);
4016 return err;
4017 }
4018 udelay(1);
4019 }
4020
4021 filter->cookie = cls_flower->cookie;
4022
4023 /* bail out here if filter already exists */
4024 spin_lock_bh(&adapter->cloud_filter_list_lock);
4025 if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4026 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4027 err = -EEXIST;
4028 goto spin_unlock;
4029 }
4030 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4031
4032 /* set the mask to all zeroes to begin with */
4033 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4034 /* start out with flow type and eth type IPv4 to begin with */
4035 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4036 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4037 if (err)
4038 goto err;
4039
4040 err = iavf_handle_tclass(adapter, tc, filter);
4041 if (err)
4042 goto err;
4043
4044 /* add filter to the list */
4045 spin_lock_bh(&adapter->cloud_filter_list_lock);
4046 list_add_tail(&filter->list, &adapter->cloud_filter_list);
4047 adapter->num_cloud_filters++;
4048 filter->add = true;
4049 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4050 spin_unlock:
4051 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4052 err:
4053 if (err)
4054 kfree(filter);
4055
4056 mutex_unlock(&adapter->crit_lock);
4057 return err;
4058 }
4059
4060 /**
4061 * iavf_delete_clsflower - Remove tc flower filters
4062 * @adapter: board private structure
4063 * @cls_flower: Pointer to struct flow_cls_offload
4064 */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4065 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4066 struct flow_cls_offload *cls_flower)
4067 {
4068 struct iavf_cloud_filter *filter = NULL;
4069 int err = 0;
4070
4071 spin_lock_bh(&adapter->cloud_filter_list_lock);
4072 filter = iavf_find_cf(adapter, &cls_flower->cookie);
4073 if (filter) {
4074 filter->del = true;
4075 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4076 } else {
4077 err = -EINVAL;
4078 }
4079 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4080
4081 return err;
4082 }
4083
4084 /**
4085 * iavf_setup_tc_cls_flower - flower classifier offloads
4086 * @adapter: board private structure
4087 * @cls_flower: pointer to flow_cls_offload struct with flow info
4088 */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4089 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4090 struct flow_cls_offload *cls_flower)
4091 {
4092 switch (cls_flower->command) {
4093 case FLOW_CLS_REPLACE:
4094 return iavf_configure_clsflower(adapter, cls_flower);
4095 case FLOW_CLS_DESTROY:
4096 return iavf_delete_clsflower(adapter, cls_flower);
4097 case FLOW_CLS_STATS:
4098 return -EOPNOTSUPP;
4099 default:
4100 return -EOPNOTSUPP;
4101 }
4102 }
4103
4104 /**
4105 * iavf_setup_tc_block_cb - block callback for tc
4106 * @type: type of offload
4107 * @type_data: offload data
4108 * @cb_priv:
4109 *
4110 * This function is the block callback for traffic classes
4111 **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4112 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4113 void *cb_priv)
4114 {
4115 struct iavf_adapter *adapter = cb_priv;
4116
4117 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4118 return -EOPNOTSUPP;
4119
4120 switch (type) {
4121 case TC_SETUP_CLSFLOWER:
4122 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4123 default:
4124 return -EOPNOTSUPP;
4125 }
4126 }
4127
4128 static LIST_HEAD(iavf_block_cb_list);
4129
4130 /**
4131 * iavf_setup_tc - configure multiple traffic classes
4132 * @netdev: network interface device structure
4133 * @type: type of offload
4134 * @type_data: tc offload data
4135 *
4136 * This function is the callback to ndo_setup_tc in the
4137 * netdev_ops.
4138 *
4139 * Returns 0 on success
4140 **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)4141 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4142 void *type_data)
4143 {
4144 struct iavf_adapter *adapter = netdev_priv(netdev);
4145
4146 switch (type) {
4147 case TC_SETUP_QDISC_MQPRIO:
4148 return __iavf_setup_tc(netdev, type_data);
4149 case TC_SETUP_BLOCK:
4150 return flow_block_cb_setup_simple(type_data,
4151 &iavf_block_cb_list,
4152 iavf_setup_tc_block_cb,
4153 adapter, adapter, true);
4154 default:
4155 return -EOPNOTSUPP;
4156 }
4157 }
4158
4159 /**
4160 * iavf_open - Called when a network interface is made active
4161 * @netdev: network interface device structure
4162 *
4163 * Returns 0 on success, negative value on failure
4164 *
4165 * The open entry point is called when a network interface is made
4166 * active by the system (IFF_UP). At this point all resources needed
4167 * for transmit and receive operations are allocated, the interrupt
4168 * handler is registered with the OS, the watchdog is started,
4169 * and the stack is notified that the interface is ready.
4170 **/
iavf_open(struct net_device * netdev)4171 static int iavf_open(struct net_device *netdev)
4172 {
4173 struct iavf_adapter *adapter = netdev_priv(netdev);
4174 int err;
4175
4176 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4177 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4178 return -EIO;
4179 }
4180
4181 while (!mutex_trylock(&adapter->crit_lock)) {
4182 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4183 * is already taken and iavf_open is called from an upper
4184 * device's notifier reacting on NETDEV_REGISTER event.
4185 * We have to leave here to avoid dead lock.
4186 */
4187 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4188 return -EBUSY;
4189
4190 usleep_range(500, 1000);
4191 }
4192
4193 if (adapter->state != __IAVF_DOWN) {
4194 err = -EBUSY;
4195 goto err_unlock;
4196 }
4197
4198 if (adapter->state == __IAVF_RUNNING &&
4199 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4200 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4201 err = 0;
4202 goto err_unlock;
4203 }
4204
4205 /* allocate transmit descriptors */
4206 err = iavf_setup_all_tx_resources(adapter);
4207 if (err)
4208 goto err_setup_tx;
4209
4210 /* allocate receive descriptors */
4211 err = iavf_setup_all_rx_resources(adapter);
4212 if (err)
4213 goto err_setup_rx;
4214
4215 /* clear any pending interrupts, may auto mask */
4216 err = iavf_request_traffic_irqs(adapter, netdev->name);
4217 if (err)
4218 goto err_req_irq;
4219
4220 spin_lock_bh(&adapter->mac_vlan_list_lock);
4221
4222 iavf_add_filter(adapter, adapter->hw.mac.addr);
4223
4224 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4225
4226 /* Restore VLAN filters that were removed with IFF_DOWN */
4227 iavf_restore_filters(adapter);
4228
4229 iavf_configure(adapter);
4230
4231 iavf_up_complete(adapter);
4232
4233 iavf_irq_enable(adapter, true);
4234
4235 mutex_unlock(&adapter->crit_lock);
4236
4237 return 0;
4238
4239 err_req_irq:
4240 iavf_down(adapter);
4241 iavf_free_traffic_irqs(adapter);
4242 err_setup_rx:
4243 iavf_free_all_rx_resources(adapter);
4244 err_setup_tx:
4245 iavf_free_all_tx_resources(adapter);
4246 err_unlock:
4247 mutex_unlock(&adapter->crit_lock);
4248
4249 return err;
4250 }
4251
4252 /**
4253 * iavf_close - Disables a network interface
4254 * @netdev: network interface device structure
4255 *
4256 * Returns 0, this is not allowed to fail
4257 *
4258 * The close entry point is called when an interface is de-activated
4259 * by the OS. The hardware is still under the drivers control, but
4260 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4261 * are freed, along with all transmit and receive resources.
4262 **/
iavf_close(struct net_device * netdev)4263 static int iavf_close(struct net_device *netdev)
4264 {
4265 struct iavf_adapter *adapter = netdev_priv(netdev);
4266 u64 aq_to_restore;
4267 int status;
4268
4269 mutex_lock(&adapter->crit_lock);
4270
4271 if (adapter->state <= __IAVF_DOWN_PENDING) {
4272 mutex_unlock(&adapter->crit_lock);
4273 return 0;
4274 }
4275
4276 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4277 if (CLIENT_ENABLED(adapter))
4278 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4279 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4280 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4281 * deadlock with adminq_task() until iavf_close timeouts. We must send
4282 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4283 * disable queues possible for vf. Give only necessary flags to
4284 * iavf_down and save other to set them right before iavf_close()
4285 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4286 * iavf will be in DOWN state.
4287 */
4288 aq_to_restore = adapter->aq_required;
4289 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4290
4291 /* Remove flags which we do not want to send after close or we want to
4292 * send before disable queues.
4293 */
4294 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
4295 IAVF_FLAG_AQ_ENABLE_QUEUES |
4296 IAVF_FLAG_AQ_CONFIGURE_QUEUES |
4297 IAVF_FLAG_AQ_ADD_VLAN_FILTER |
4298 IAVF_FLAG_AQ_ADD_MAC_FILTER |
4299 IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
4300 IAVF_FLAG_AQ_ADD_FDIR_FILTER |
4301 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4302
4303 iavf_down(adapter);
4304 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4305 iavf_free_traffic_irqs(adapter);
4306
4307 mutex_unlock(&adapter->crit_lock);
4308
4309 /* We explicitly don't free resources here because the hardware is
4310 * still active and can DMA into memory. Resources are cleared in
4311 * iavf_virtchnl_completion() after we get confirmation from the PF
4312 * driver that the rings have been stopped.
4313 *
4314 * Also, we wait for state to transition to __IAVF_DOWN before
4315 * returning. State change occurs in iavf_virtchnl_completion() after
4316 * VF resources are released (which occurs after PF driver processes and
4317 * responds to admin queue commands).
4318 */
4319
4320 status = wait_event_timeout(adapter->down_waitqueue,
4321 adapter->state == __IAVF_DOWN,
4322 msecs_to_jiffies(500));
4323 if (!status)
4324 netdev_warn(netdev, "Device resources not yet released\n");
4325
4326 mutex_lock(&adapter->crit_lock);
4327 adapter->aq_required |= aq_to_restore;
4328 mutex_unlock(&adapter->crit_lock);
4329 return 0;
4330 }
4331
4332 /**
4333 * iavf_change_mtu - Change the Maximum Transfer Unit
4334 * @netdev: network interface device structure
4335 * @new_mtu: new value for maximum frame size
4336 *
4337 * Returns 0 on success, negative on failure
4338 **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)4339 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4340 {
4341 struct iavf_adapter *adapter = netdev_priv(netdev);
4342
4343 netdev_dbg(netdev, "changing MTU from %d to %d\n",
4344 netdev->mtu, new_mtu);
4345 netdev->mtu = new_mtu;
4346 if (CLIENT_ENABLED(adapter)) {
4347 iavf_notify_client_l2_params(&adapter->vsi);
4348 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4349 }
4350
4351 if (netif_running(netdev)) {
4352 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4353 queue_work(adapter->wq, &adapter->reset_task);
4354 }
4355
4356 return 0;
4357 }
4358
4359 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
4360 NETIF_F_HW_VLAN_CTAG_TX | \
4361 NETIF_F_HW_VLAN_STAG_RX | \
4362 NETIF_F_HW_VLAN_STAG_TX)
4363
4364 /**
4365 * iavf_set_features - set the netdev feature flags
4366 * @netdev: ptr to the netdev being adjusted
4367 * @features: the feature set that the stack is suggesting
4368 * Note: expects to be called while under rtnl_lock()
4369 **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)4370 static int iavf_set_features(struct net_device *netdev,
4371 netdev_features_t features)
4372 {
4373 struct iavf_adapter *adapter = netdev_priv(netdev);
4374
4375 /* trigger update on any VLAN feature change */
4376 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4377 (features & NETIF_VLAN_OFFLOAD_FEATURES))
4378 iavf_set_vlan_offload_features(adapter, netdev->features,
4379 features);
4380
4381 return 0;
4382 }
4383
4384 /**
4385 * iavf_features_check - Validate encapsulated packet conforms to limits
4386 * @skb: skb buff
4387 * @dev: This physical port's netdev
4388 * @features: Offload features that the stack believes apply
4389 **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4390 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4391 struct net_device *dev,
4392 netdev_features_t features)
4393 {
4394 size_t len;
4395
4396 /* No point in doing any of this if neither checksum nor GSO are
4397 * being requested for this frame. We can rule out both by just
4398 * checking for CHECKSUM_PARTIAL
4399 */
4400 if (skb->ip_summed != CHECKSUM_PARTIAL)
4401 return features;
4402
4403 /* We cannot support GSO if the MSS is going to be less than
4404 * 64 bytes. If it is then we need to drop support for GSO.
4405 */
4406 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4407 features &= ~NETIF_F_GSO_MASK;
4408
4409 /* MACLEN can support at most 63 words */
4410 len = skb_network_header(skb) - skb->data;
4411 if (len & ~(63 * 2))
4412 goto out_err;
4413
4414 /* IPLEN and EIPLEN can support at most 127 dwords */
4415 len = skb_transport_header(skb) - skb_network_header(skb);
4416 if (len & ~(127 * 4))
4417 goto out_err;
4418
4419 if (skb->encapsulation) {
4420 /* L4TUNLEN can support 127 words */
4421 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4422 if (len & ~(127 * 2))
4423 goto out_err;
4424
4425 /* IPLEN can support at most 127 dwords */
4426 len = skb_inner_transport_header(skb) -
4427 skb_inner_network_header(skb);
4428 if (len & ~(127 * 4))
4429 goto out_err;
4430 }
4431
4432 /* No need to validate L4LEN as TCP is the only protocol with a
4433 * flexible value and we support all possible values supported
4434 * by TCP, which is at most 15 dwords
4435 */
4436
4437 return features;
4438 out_err:
4439 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4440 }
4441
4442 /**
4443 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4444 * @adapter: board private structure
4445 *
4446 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4447 * were negotiated determine the VLAN features that can be toggled on and off.
4448 **/
4449 static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)4450 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4451 {
4452 netdev_features_t hw_features = 0;
4453
4454 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4455 return hw_features;
4456
4457 /* Enable VLAN features if supported */
4458 if (VLAN_ALLOWED(adapter)) {
4459 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4460 NETIF_F_HW_VLAN_CTAG_RX);
4461 } else if (VLAN_V2_ALLOWED(adapter)) {
4462 struct virtchnl_vlan_caps *vlan_v2_caps =
4463 &adapter->vlan_v2_caps;
4464 struct virtchnl_vlan_supported_caps *stripping_support =
4465 &vlan_v2_caps->offloads.stripping_support;
4466 struct virtchnl_vlan_supported_caps *insertion_support =
4467 &vlan_v2_caps->offloads.insertion_support;
4468
4469 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4470 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4471 if (stripping_support->outer &
4472 VIRTCHNL_VLAN_ETHERTYPE_8100)
4473 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4474 if (stripping_support->outer &
4475 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4476 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4477 } else if (stripping_support->inner !=
4478 VIRTCHNL_VLAN_UNSUPPORTED &&
4479 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4480 if (stripping_support->inner &
4481 VIRTCHNL_VLAN_ETHERTYPE_8100)
4482 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4483 }
4484
4485 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4486 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4487 if (insertion_support->outer &
4488 VIRTCHNL_VLAN_ETHERTYPE_8100)
4489 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4490 if (insertion_support->outer &
4491 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4492 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4493 } else if (insertion_support->inner &&
4494 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4495 if (insertion_support->inner &
4496 VIRTCHNL_VLAN_ETHERTYPE_8100)
4497 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4498 }
4499 }
4500
4501 return hw_features;
4502 }
4503
4504 /**
4505 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4506 * @adapter: board private structure
4507 *
4508 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4509 * were negotiated determine the VLAN features that are enabled by default.
4510 **/
4511 static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)4512 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4513 {
4514 netdev_features_t features = 0;
4515
4516 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4517 return features;
4518
4519 if (VLAN_ALLOWED(adapter)) {
4520 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4521 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4522 } else if (VLAN_V2_ALLOWED(adapter)) {
4523 struct virtchnl_vlan_caps *vlan_v2_caps =
4524 &adapter->vlan_v2_caps;
4525 struct virtchnl_vlan_supported_caps *filtering_support =
4526 &vlan_v2_caps->filtering.filtering_support;
4527 struct virtchnl_vlan_supported_caps *stripping_support =
4528 &vlan_v2_caps->offloads.stripping_support;
4529 struct virtchnl_vlan_supported_caps *insertion_support =
4530 &vlan_v2_caps->offloads.insertion_support;
4531 u32 ethertype_init;
4532
4533 /* give priority to outer stripping and don't support both outer
4534 * and inner stripping
4535 */
4536 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4537 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4538 if (stripping_support->outer &
4539 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4540 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4541 features |= NETIF_F_HW_VLAN_CTAG_RX;
4542 else if (stripping_support->outer &
4543 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4544 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4545 features |= NETIF_F_HW_VLAN_STAG_RX;
4546 } else if (stripping_support->inner !=
4547 VIRTCHNL_VLAN_UNSUPPORTED) {
4548 if (stripping_support->inner &
4549 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4550 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4551 features |= NETIF_F_HW_VLAN_CTAG_RX;
4552 }
4553
4554 /* give priority to outer insertion and don't support both outer
4555 * and inner insertion
4556 */
4557 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4558 if (insertion_support->outer &
4559 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4560 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4561 features |= NETIF_F_HW_VLAN_CTAG_TX;
4562 else if (insertion_support->outer &
4563 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4564 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4565 features |= NETIF_F_HW_VLAN_STAG_TX;
4566 } else if (insertion_support->inner !=
4567 VIRTCHNL_VLAN_UNSUPPORTED) {
4568 if (insertion_support->inner &
4569 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4570 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4571 features |= NETIF_F_HW_VLAN_CTAG_TX;
4572 }
4573
4574 /* give priority to outer filtering and don't bother if both
4575 * outer and inner filtering are enabled
4576 */
4577 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4578 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4579 if (filtering_support->outer &
4580 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4581 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4582 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4583 if (filtering_support->outer &
4584 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4585 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4586 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4587 } else if (filtering_support->inner !=
4588 VIRTCHNL_VLAN_UNSUPPORTED) {
4589 if (filtering_support->inner &
4590 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4591 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4592 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4593 if (filtering_support->inner &
4594 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4595 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4596 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4597 }
4598 }
4599
4600 return features;
4601 }
4602
4603 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4604 (!(((requested) & (feature_bit)) && \
4605 !((allowed) & (feature_bit))))
4606
4607 /**
4608 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4609 * @adapter: board private structure
4610 * @requested_features: stack requested NETDEV features
4611 **/
4612 static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4613 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4614 netdev_features_t requested_features)
4615 {
4616 netdev_features_t allowed_features;
4617
4618 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4619 iavf_get_netdev_vlan_features(adapter);
4620
4621 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4622 allowed_features,
4623 NETIF_F_HW_VLAN_CTAG_TX))
4624 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4625
4626 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4627 allowed_features,
4628 NETIF_F_HW_VLAN_CTAG_RX))
4629 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4630
4631 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4632 allowed_features,
4633 NETIF_F_HW_VLAN_STAG_TX))
4634 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4635 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4636 allowed_features,
4637 NETIF_F_HW_VLAN_STAG_RX))
4638 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4639
4640 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4641 allowed_features,
4642 NETIF_F_HW_VLAN_CTAG_FILTER))
4643 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4644
4645 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4646 allowed_features,
4647 NETIF_F_HW_VLAN_STAG_FILTER))
4648 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4649
4650 if ((requested_features &
4651 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4652 (requested_features &
4653 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4654 adapter->vlan_v2_caps.offloads.ethertype_match ==
4655 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4656 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4657 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4658 NETIF_F_HW_VLAN_STAG_TX);
4659 }
4660
4661 return requested_features;
4662 }
4663
4664 /**
4665 * iavf_fix_features - fix up the netdev feature bits
4666 * @netdev: our net device
4667 * @features: desired feature bits
4668 *
4669 * Returns fixed-up features bits
4670 **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)4671 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4672 netdev_features_t features)
4673 {
4674 struct iavf_adapter *adapter = netdev_priv(netdev);
4675
4676 return iavf_fix_netdev_vlan_features(adapter, features);
4677 }
4678
4679 static const struct net_device_ops iavf_netdev_ops = {
4680 .ndo_open = iavf_open,
4681 .ndo_stop = iavf_close,
4682 .ndo_start_xmit = iavf_xmit_frame,
4683 .ndo_set_rx_mode = iavf_set_rx_mode,
4684 .ndo_validate_addr = eth_validate_addr,
4685 .ndo_set_mac_address = iavf_set_mac,
4686 .ndo_change_mtu = iavf_change_mtu,
4687 .ndo_tx_timeout = iavf_tx_timeout,
4688 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
4689 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
4690 .ndo_features_check = iavf_features_check,
4691 .ndo_fix_features = iavf_fix_features,
4692 .ndo_set_features = iavf_set_features,
4693 .ndo_setup_tc = iavf_setup_tc,
4694 };
4695
4696 /**
4697 * iavf_check_reset_complete - check that VF reset is complete
4698 * @hw: pointer to hw struct
4699 *
4700 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4701 **/
iavf_check_reset_complete(struct iavf_hw * hw)4702 static int iavf_check_reset_complete(struct iavf_hw *hw)
4703 {
4704 u32 rstat;
4705 int i;
4706
4707 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4708 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4709 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4710 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4711 (rstat == VIRTCHNL_VFR_COMPLETED))
4712 return 0;
4713 usleep_range(10, 20);
4714 }
4715 return -EBUSY;
4716 }
4717
4718 /**
4719 * iavf_process_config - Process the config information we got from the PF
4720 * @adapter: board private structure
4721 *
4722 * Verify that we have a valid config struct, and set up our netdev features
4723 * and our VSI struct.
4724 **/
iavf_process_config(struct iavf_adapter * adapter)4725 int iavf_process_config(struct iavf_adapter *adapter)
4726 {
4727 struct virtchnl_vf_resource *vfres = adapter->vf_res;
4728 netdev_features_t hw_vlan_features, vlan_features;
4729 struct net_device *netdev = adapter->netdev;
4730 netdev_features_t hw_enc_features;
4731 netdev_features_t hw_features;
4732
4733 hw_enc_features = NETIF_F_SG |
4734 NETIF_F_IP_CSUM |
4735 NETIF_F_IPV6_CSUM |
4736 NETIF_F_HIGHDMA |
4737 NETIF_F_SOFT_FEATURES |
4738 NETIF_F_TSO |
4739 NETIF_F_TSO_ECN |
4740 NETIF_F_TSO6 |
4741 NETIF_F_SCTP_CRC |
4742 NETIF_F_RXHASH |
4743 NETIF_F_RXCSUM |
4744 0;
4745
4746 /* advertise to stack only if offloads for encapsulated packets is
4747 * supported
4748 */
4749 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4750 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4751 NETIF_F_GSO_GRE |
4752 NETIF_F_GSO_GRE_CSUM |
4753 NETIF_F_GSO_IPXIP4 |
4754 NETIF_F_GSO_IPXIP6 |
4755 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4756 NETIF_F_GSO_PARTIAL |
4757 0;
4758
4759 if (!(vfres->vf_cap_flags &
4760 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4761 netdev->gso_partial_features |=
4762 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4763
4764 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4765 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4766 netdev->hw_enc_features |= hw_enc_features;
4767 }
4768 /* record features VLANs can make use of */
4769 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4770
4771 /* Write features and hw_features separately to avoid polluting
4772 * with, or dropping, features that are set when we registered.
4773 */
4774 hw_features = hw_enc_features;
4775
4776 /* get HW VLAN features that can be toggled */
4777 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4778
4779 /* Enable cloud filter if ADQ is supported */
4780 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4781 hw_features |= NETIF_F_HW_TC;
4782 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4783 hw_features |= NETIF_F_GSO_UDP_L4;
4784
4785 netdev->hw_features |= hw_features | hw_vlan_features;
4786 vlan_features = iavf_get_netdev_vlan_features(adapter);
4787
4788 netdev->features |= hw_features | vlan_features;
4789
4790 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4791 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4792
4793 netdev->priv_flags |= IFF_UNICAST_FLT;
4794
4795 /* Do not turn on offloads when they are requested to be turned off.
4796 * TSO needs minimum 576 bytes to work correctly.
4797 */
4798 if (netdev->wanted_features) {
4799 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4800 netdev->mtu < 576)
4801 netdev->features &= ~NETIF_F_TSO;
4802 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4803 netdev->mtu < 576)
4804 netdev->features &= ~NETIF_F_TSO6;
4805 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4806 netdev->features &= ~NETIF_F_TSO_ECN;
4807 if (!(netdev->wanted_features & NETIF_F_GRO))
4808 netdev->features &= ~NETIF_F_GRO;
4809 if (!(netdev->wanted_features & NETIF_F_GSO))
4810 netdev->features &= ~NETIF_F_GSO;
4811 }
4812
4813 return 0;
4814 }
4815
4816 /**
4817 * iavf_shutdown - Shutdown the device in preparation for a reboot
4818 * @pdev: pci device structure
4819 **/
iavf_shutdown(struct pci_dev * pdev)4820 static void iavf_shutdown(struct pci_dev *pdev)
4821 {
4822 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4823 struct net_device *netdev = adapter->netdev;
4824
4825 netif_device_detach(netdev);
4826
4827 if (netif_running(netdev))
4828 iavf_close(netdev);
4829
4830 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4831 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4832 /* Prevent the watchdog from running. */
4833 iavf_change_state(adapter, __IAVF_REMOVE);
4834 adapter->aq_required = 0;
4835 mutex_unlock(&adapter->crit_lock);
4836
4837 #ifdef CONFIG_PM
4838 pci_save_state(pdev);
4839
4840 #endif
4841 pci_disable_device(pdev);
4842 }
4843
4844 /**
4845 * iavf_probe - Device Initialization Routine
4846 * @pdev: PCI device information struct
4847 * @ent: entry in iavf_pci_tbl
4848 *
4849 * Returns 0 on success, negative on failure
4850 *
4851 * iavf_probe initializes an adapter identified by a pci_dev structure.
4852 * The OS initialization, configuring of the adapter private structure,
4853 * and a hardware reset occur.
4854 **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4855 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4856 {
4857 struct net_device *netdev;
4858 struct iavf_adapter *adapter = NULL;
4859 struct iavf_hw *hw = NULL;
4860 int err;
4861
4862 err = pci_enable_device(pdev);
4863 if (err)
4864 return err;
4865
4866 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4867 if (err) {
4868 dev_err(&pdev->dev,
4869 "DMA configuration failed: 0x%x\n", err);
4870 goto err_dma;
4871 }
4872
4873 err = pci_request_regions(pdev, iavf_driver_name);
4874 if (err) {
4875 dev_err(&pdev->dev,
4876 "pci_request_regions failed 0x%x\n", err);
4877 goto err_pci_reg;
4878 }
4879
4880 pci_enable_pcie_error_reporting(pdev);
4881
4882 pci_set_master(pdev);
4883
4884 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4885 IAVF_MAX_REQ_QUEUES);
4886 if (!netdev) {
4887 err = -ENOMEM;
4888 goto err_alloc_etherdev;
4889 }
4890
4891 SET_NETDEV_DEV(netdev, &pdev->dev);
4892
4893 pci_set_drvdata(pdev, netdev);
4894 adapter = netdev_priv(netdev);
4895
4896 adapter->netdev = netdev;
4897 adapter->pdev = pdev;
4898
4899 hw = &adapter->hw;
4900 hw->back = adapter;
4901
4902 adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4903 iavf_driver_name);
4904 if (!adapter->wq) {
4905 err = -ENOMEM;
4906 goto err_alloc_wq;
4907 }
4908
4909 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4910 iavf_change_state(adapter, __IAVF_STARTUP);
4911
4912 /* Call save state here because it relies on the adapter struct. */
4913 pci_save_state(pdev);
4914
4915 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4916 pci_resource_len(pdev, 0));
4917 if (!hw->hw_addr) {
4918 err = -EIO;
4919 goto err_ioremap;
4920 }
4921 hw->vendor_id = pdev->vendor;
4922 hw->device_id = pdev->device;
4923 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4924 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4925 hw->subsystem_device_id = pdev->subsystem_device;
4926 hw->bus.device = PCI_SLOT(pdev->devfn);
4927 hw->bus.func = PCI_FUNC(pdev->devfn);
4928 hw->bus.bus_id = pdev->bus->number;
4929
4930 /* set up the locks for the AQ, do this only once in probe
4931 * and destroy them only once in remove
4932 */
4933 mutex_init(&adapter->crit_lock);
4934 mutex_init(&adapter->client_lock);
4935 mutex_init(&hw->aq.asq_mutex);
4936 mutex_init(&hw->aq.arq_mutex);
4937
4938 spin_lock_init(&adapter->mac_vlan_list_lock);
4939 spin_lock_init(&adapter->cloud_filter_list_lock);
4940 spin_lock_init(&adapter->fdir_fltr_lock);
4941 spin_lock_init(&adapter->adv_rss_lock);
4942
4943 INIT_LIST_HEAD(&adapter->mac_filter_list);
4944 INIT_LIST_HEAD(&adapter->vlan_filter_list);
4945 INIT_LIST_HEAD(&adapter->cloud_filter_list);
4946 INIT_LIST_HEAD(&adapter->fdir_list_head);
4947 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4948
4949 INIT_WORK(&adapter->reset_task, iavf_reset_task);
4950 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4951 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4952 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4953 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4954 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4955
4956 /* Setup the wait queue for indicating transition to down status */
4957 init_waitqueue_head(&adapter->down_waitqueue);
4958
4959 /* Setup the wait queue for indicating virtchannel events */
4960 init_waitqueue_head(&adapter->vc_waitqueue);
4961
4962 return 0;
4963
4964 err_ioremap:
4965 destroy_workqueue(adapter->wq);
4966 err_alloc_wq:
4967 free_netdev(netdev);
4968 err_alloc_etherdev:
4969 pci_disable_pcie_error_reporting(pdev);
4970 pci_release_regions(pdev);
4971 err_pci_reg:
4972 err_dma:
4973 pci_disable_device(pdev);
4974 return err;
4975 }
4976
4977 /**
4978 * iavf_suspend - Power management suspend routine
4979 * @dev_d: device info pointer
4980 *
4981 * Called when the system (VM) is entering sleep/suspend.
4982 **/
iavf_suspend(struct device * dev_d)4983 static int __maybe_unused iavf_suspend(struct device *dev_d)
4984 {
4985 struct net_device *netdev = dev_get_drvdata(dev_d);
4986 struct iavf_adapter *adapter = netdev_priv(netdev);
4987
4988 netif_device_detach(netdev);
4989
4990 while (!mutex_trylock(&adapter->crit_lock))
4991 usleep_range(500, 1000);
4992
4993 if (netif_running(netdev)) {
4994 rtnl_lock();
4995 iavf_down(adapter);
4996 rtnl_unlock();
4997 }
4998 iavf_free_misc_irq(adapter);
4999 iavf_reset_interrupt_capability(adapter);
5000
5001 mutex_unlock(&adapter->crit_lock);
5002
5003 return 0;
5004 }
5005
5006 /**
5007 * iavf_resume - Power management resume routine
5008 * @dev_d: device info pointer
5009 *
5010 * Called when the system (VM) is resumed from sleep/suspend.
5011 **/
iavf_resume(struct device * dev_d)5012 static int __maybe_unused iavf_resume(struct device *dev_d)
5013 {
5014 struct pci_dev *pdev = to_pci_dev(dev_d);
5015 struct iavf_adapter *adapter;
5016 u32 err;
5017
5018 adapter = iavf_pdev_to_adapter(pdev);
5019
5020 pci_set_master(pdev);
5021
5022 rtnl_lock();
5023 err = iavf_set_interrupt_capability(adapter);
5024 if (err) {
5025 rtnl_unlock();
5026 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5027 return err;
5028 }
5029 err = iavf_request_misc_irq(adapter);
5030 rtnl_unlock();
5031 if (err) {
5032 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5033 return err;
5034 }
5035
5036 queue_work(adapter->wq, &adapter->reset_task);
5037
5038 netif_device_attach(adapter->netdev);
5039
5040 return err;
5041 }
5042
5043 /**
5044 * iavf_remove - Device Removal Routine
5045 * @pdev: PCI device information struct
5046 *
5047 * iavf_remove is called by the PCI subsystem to alert the driver
5048 * that it should release a PCI device. The could be caused by a
5049 * Hot-Plug event, or because the driver is going to be removed from
5050 * memory.
5051 **/
iavf_remove(struct pci_dev * pdev)5052 static void iavf_remove(struct pci_dev *pdev)
5053 {
5054 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5055 struct iavf_fdir_fltr *fdir, *fdirtmp;
5056 struct iavf_vlan_filter *vlf, *vlftmp;
5057 struct iavf_cloud_filter *cf, *cftmp;
5058 struct iavf_adv_rss *rss, *rsstmp;
5059 struct iavf_mac_filter *f, *ftmp;
5060 struct net_device *netdev;
5061 struct iavf_hw *hw;
5062 int err;
5063
5064 netdev = adapter->netdev;
5065 hw = &adapter->hw;
5066
5067 if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5068 return;
5069
5070 /* Wait until port initialization is complete.
5071 * There are flows where register/unregister netdev may race.
5072 */
5073 while (1) {
5074 mutex_lock(&adapter->crit_lock);
5075 if (adapter->state == __IAVF_RUNNING ||
5076 adapter->state == __IAVF_DOWN ||
5077 adapter->state == __IAVF_INIT_FAILED) {
5078 mutex_unlock(&adapter->crit_lock);
5079 break;
5080 }
5081
5082 mutex_unlock(&adapter->crit_lock);
5083 usleep_range(500, 1000);
5084 }
5085 cancel_delayed_work_sync(&adapter->watchdog_task);
5086
5087 if (adapter->netdev_registered) {
5088 rtnl_lock();
5089 unregister_netdevice(netdev);
5090 adapter->netdev_registered = false;
5091 rtnl_unlock();
5092 }
5093 if (CLIENT_ALLOWED(adapter)) {
5094 err = iavf_lan_del_device(adapter);
5095 if (err)
5096 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5097 err);
5098 }
5099
5100 mutex_lock(&adapter->crit_lock);
5101 dev_info(&adapter->pdev->dev, "Remove device\n");
5102 iavf_change_state(adapter, __IAVF_REMOVE);
5103
5104 iavf_request_reset(adapter);
5105 msleep(50);
5106 /* If the FW isn't responding, kick it once, but only once. */
5107 if (!iavf_asq_done(hw)) {
5108 iavf_request_reset(adapter);
5109 msleep(50);
5110 }
5111
5112 iavf_misc_irq_disable(adapter);
5113 /* Shut down all the garbage mashers on the detention level */
5114 cancel_work_sync(&adapter->reset_task);
5115 cancel_delayed_work_sync(&adapter->watchdog_task);
5116 cancel_work_sync(&adapter->adminq_task);
5117 cancel_delayed_work_sync(&adapter->client_task);
5118
5119 adapter->aq_required = 0;
5120 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5121
5122 iavf_free_all_tx_resources(adapter);
5123 iavf_free_all_rx_resources(adapter);
5124 iavf_free_misc_irq(adapter);
5125
5126 iavf_reset_interrupt_capability(adapter);
5127 iavf_free_q_vectors(adapter);
5128
5129 iavf_free_rss(adapter);
5130
5131 if (hw->aq.asq.count)
5132 iavf_shutdown_adminq(hw);
5133
5134 /* destroy the locks only once, here */
5135 mutex_destroy(&hw->aq.arq_mutex);
5136 mutex_destroy(&hw->aq.asq_mutex);
5137 mutex_destroy(&adapter->client_lock);
5138 mutex_unlock(&adapter->crit_lock);
5139 mutex_destroy(&adapter->crit_lock);
5140
5141 iounmap(hw->hw_addr);
5142 pci_release_regions(pdev);
5143 iavf_free_queues(adapter);
5144 kfree(adapter->vf_res);
5145 spin_lock_bh(&adapter->mac_vlan_list_lock);
5146 /* If we got removed before an up/down sequence, we've got a filter
5147 * hanging out there that we need to get rid of.
5148 */
5149 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5150 list_del(&f->list);
5151 kfree(f);
5152 }
5153 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5154 list) {
5155 list_del(&vlf->list);
5156 kfree(vlf);
5157 }
5158
5159 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5160
5161 spin_lock_bh(&adapter->cloud_filter_list_lock);
5162 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5163 list_del(&cf->list);
5164 kfree(cf);
5165 }
5166 spin_unlock_bh(&adapter->cloud_filter_list_lock);
5167
5168 spin_lock_bh(&adapter->fdir_fltr_lock);
5169 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5170 list_del(&fdir->list);
5171 kfree(fdir);
5172 }
5173 spin_unlock_bh(&adapter->fdir_fltr_lock);
5174
5175 spin_lock_bh(&adapter->adv_rss_lock);
5176 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5177 list) {
5178 list_del(&rss->list);
5179 kfree(rss);
5180 }
5181 spin_unlock_bh(&adapter->adv_rss_lock);
5182
5183 destroy_workqueue(adapter->wq);
5184
5185 free_netdev(netdev);
5186
5187 pci_disable_pcie_error_reporting(pdev);
5188
5189 pci_disable_device(pdev);
5190 }
5191
5192 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5193
5194 static struct pci_driver iavf_driver = {
5195 .name = iavf_driver_name,
5196 .id_table = iavf_pci_tbl,
5197 .probe = iavf_probe,
5198 .remove = iavf_remove,
5199 .driver.pm = &iavf_pm_ops,
5200 .shutdown = iavf_shutdown,
5201 };
5202
5203 /**
5204 * iavf_init_module - Driver Registration Routine
5205 *
5206 * iavf_init_module is the first routine called when the driver is
5207 * loaded. All it does is register with the PCI subsystem.
5208 **/
iavf_init_module(void)5209 static int __init iavf_init_module(void)
5210 {
5211 pr_info("iavf: %s\n", iavf_driver_string);
5212
5213 pr_info("%s\n", iavf_copyright);
5214
5215 return pci_register_driver(&iavf_driver);
5216 }
5217
5218 module_init(iavf_init_module);
5219
5220 /**
5221 * iavf_exit_module - Driver Exit Cleanup Routine
5222 *
5223 * iavf_exit_module is called just before the driver is removed
5224 * from memory.
5225 **/
iavf_exit_module(void)5226 static void __exit iavf_exit_module(void)
5227 {
5228 pci_unregister_driver(&iavf_driver);
5229 }
5230
5231 module_exit(iavf_exit_module);
5232
5233 /* iavf_main.c */
5234