1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
3
4 #include "fm10k.h"
5 #include <linux/vmalloc.h>
6 #include <net/udp_tunnel.h>
7 #include <linux/if_macvlan.h>
8
9 /**
10 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
11 * @tx_ring: tx descriptor ring (for a specific queue) to setup
12 *
13 * Return 0 on success, negative on failure
14 **/
fm10k_setup_tx_resources(struct fm10k_ring * tx_ring)15 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring)
16 {
17 struct device *dev = tx_ring->dev;
18 int size;
19
20 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
21
22 tx_ring->tx_buffer = vzalloc(size);
23 if (!tx_ring->tx_buffer)
24 goto err;
25
26 u64_stats_init(&tx_ring->syncp);
27
28 /* round up to nearest 4K */
29 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
30 tx_ring->size = ALIGN(tx_ring->size, 4096);
31
32 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
33 &tx_ring->dma, GFP_KERNEL);
34 if (!tx_ring->desc)
35 goto err;
36
37 return 0;
38
39 err:
40 vfree(tx_ring->tx_buffer);
41 tx_ring->tx_buffer = NULL;
42 return -ENOMEM;
43 }
44
45 /**
46 * fm10k_setup_all_tx_resources - allocate all queues Tx resources
47 * @interface: board private structure
48 *
49 * If this function returns with an error, then it's possible one or
50 * more of the rings is populated (while the rest are not). It is the
51 * callers duty to clean those orphaned rings.
52 *
53 * Return 0 on success, negative on failure
54 **/
fm10k_setup_all_tx_resources(struct fm10k_intfc * interface)55 static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
56 {
57 int i, err;
58
59 for (i = 0; i < interface->num_tx_queues; i++) {
60 err = fm10k_setup_tx_resources(interface->tx_ring[i]);
61 if (!err)
62 continue;
63
64 netif_err(interface, probe, interface->netdev,
65 "Allocation for Tx Queue %u failed\n", i);
66 goto err_setup_tx;
67 }
68
69 return 0;
70 err_setup_tx:
71 /* rewind the index freeing the rings as we go */
72 while (i--)
73 fm10k_free_tx_resources(interface->tx_ring[i]);
74 return err;
75 }
76
77 /**
78 * fm10k_setup_rx_resources - allocate Rx resources (Descriptors)
79 * @rx_ring: rx descriptor ring (for a specific queue) to setup
80 *
81 * Returns 0 on success, negative on failure
82 **/
fm10k_setup_rx_resources(struct fm10k_ring * rx_ring)83 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring)
84 {
85 struct device *dev = rx_ring->dev;
86 int size;
87
88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
89
90 rx_ring->rx_buffer = vzalloc(size);
91 if (!rx_ring->rx_buffer)
92 goto err;
93
94 u64_stats_init(&rx_ring->syncp);
95
96 /* Round up to nearest 4K */
97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc);
98 rx_ring->size = ALIGN(rx_ring->size, 4096);
99
100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
101 &rx_ring->dma, GFP_KERNEL);
102 if (!rx_ring->desc)
103 goto err;
104
105 return 0;
106 err:
107 vfree(rx_ring->rx_buffer);
108 rx_ring->rx_buffer = NULL;
109 return -ENOMEM;
110 }
111
112 /**
113 * fm10k_setup_all_rx_resources - allocate all queues Rx resources
114 * @interface: board private structure
115 *
116 * If this function returns with an error, then it's possible one or
117 * more of the rings is populated (while the rest are not). It is the
118 * callers duty to clean those orphaned rings.
119 *
120 * Return 0 on success, negative on failure
121 **/
fm10k_setup_all_rx_resources(struct fm10k_intfc * interface)122 static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
123 {
124 int i, err;
125
126 for (i = 0; i < interface->num_rx_queues; i++) {
127 err = fm10k_setup_rx_resources(interface->rx_ring[i]);
128 if (!err)
129 continue;
130
131 netif_err(interface, probe, interface->netdev,
132 "Allocation for Rx Queue %u failed\n", i);
133 goto err_setup_rx;
134 }
135
136 return 0;
137 err_setup_rx:
138 /* rewind the index freeing the rings as we go */
139 while (i--)
140 fm10k_free_rx_resources(interface->rx_ring[i]);
141 return err;
142 }
143
fm10k_unmap_and_free_tx_resource(struct fm10k_ring * ring,struct fm10k_tx_buffer * tx_buffer)144 void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
145 struct fm10k_tx_buffer *tx_buffer)
146 {
147 if (tx_buffer->skb) {
148 dev_kfree_skb_any(tx_buffer->skb);
149 if (dma_unmap_len(tx_buffer, len))
150 dma_unmap_single(ring->dev,
151 dma_unmap_addr(tx_buffer, dma),
152 dma_unmap_len(tx_buffer, len),
153 DMA_TO_DEVICE);
154 } else if (dma_unmap_len(tx_buffer, len)) {
155 dma_unmap_page(ring->dev,
156 dma_unmap_addr(tx_buffer, dma),
157 dma_unmap_len(tx_buffer, len),
158 DMA_TO_DEVICE);
159 }
160 tx_buffer->next_to_watch = NULL;
161 tx_buffer->skb = NULL;
162 dma_unmap_len_set(tx_buffer, len, 0);
163 /* tx_buffer must be completely set up in the transmit path */
164 }
165
166 /**
167 * fm10k_clean_tx_ring - Free Tx Buffers
168 * @tx_ring: ring to be cleaned
169 **/
fm10k_clean_tx_ring(struct fm10k_ring * tx_ring)170 static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
171 {
172 unsigned long size;
173 u16 i;
174
175 /* ring already cleared, nothing to do */
176 if (!tx_ring->tx_buffer)
177 return;
178
179 /* Free all the Tx ring sk_buffs */
180 for (i = 0; i < tx_ring->count; i++) {
181 struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
182
183 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
184 }
185
186 /* reset BQL values */
187 netdev_tx_reset_queue(txring_txq(tx_ring));
188
189 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
190 memset(tx_ring->tx_buffer, 0, size);
191
192 /* Zero out the descriptor ring */
193 memset(tx_ring->desc, 0, tx_ring->size);
194 }
195
196 /**
197 * fm10k_free_tx_resources - Free Tx Resources per Queue
198 * @tx_ring: Tx descriptor ring for a specific queue
199 *
200 * Free all transmit software resources
201 **/
fm10k_free_tx_resources(struct fm10k_ring * tx_ring)202 void fm10k_free_tx_resources(struct fm10k_ring *tx_ring)
203 {
204 fm10k_clean_tx_ring(tx_ring);
205
206 vfree(tx_ring->tx_buffer);
207 tx_ring->tx_buffer = NULL;
208
209 /* if not set, then don't free */
210 if (!tx_ring->desc)
211 return;
212
213 dma_free_coherent(tx_ring->dev, tx_ring->size,
214 tx_ring->desc, tx_ring->dma);
215 tx_ring->desc = NULL;
216 }
217
218 /**
219 * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues
220 * @interface: board private structure
221 **/
fm10k_clean_all_tx_rings(struct fm10k_intfc * interface)222 void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
223 {
224 int i;
225
226 for (i = 0; i < interface->num_tx_queues; i++)
227 fm10k_clean_tx_ring(interface->tx_ring[i]);
228 }
229
230 /**
231 * fm10k_free_all_tx_resources - Free Tx Resources for All Queues
232 * @interface: board private structure
233 *
234 * Free all transmit software resources
235 **/
fm10k_free_all_tx_resources(struct fm10k_intfc * interface)236 static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface)
237 {
238 int i = interface->num_tx_queues;
239
240 while (i--)
241 fm10k_free_tx_resources(interface->tx_ring[i]);
242 }
243
244 /**
245 * fm10k_clean_rx_ring - Free Rx Buffers per Queue
246 * @rx_ring: ring to free buffers from
247 **/
fm10k_clean_rx_ring(struct fm10k_ring * rx_ring)248 static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
249 {
250 unsigned long size;
251 u16 i;
252
253 if (!rx_ring->rx_buffer)
254 return;
255
256 dev_kfree_skb(rx_ring->skb);
257 rx_ring->skb = NULL;
258
259 /* Free all the Rx ring sk_buffs */
260 for (i = 0; i < rx_ring->count; i++) {
261 struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i];
262 /* clean-up will only set page pointer to NULL */
263 if (!buffer->page)
264 continue;
265
266 dma_unmap_page(rx_ring->dev, buffer->dma,
267 PAGE_SIZE, DMA_FROM_DEVICE);
268 __free_page(buffer->page);
269
270 buffer->page = NULL;
271 }
272
273 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
274 memset(rx_ring->rx_buffer, 0, size);
275
276 /* Zero out the descriptor ring */
277 memset(rx_ring->desc, 0, rx_ring->size);
278
279 rx_ring->next_to_alloc = 0;
280 rx_ring->next_to_clean = 0;
281 rx_ring->next_to_use = 0;
282 }
283
284 /**
285 * fm10k_free_rx_resources - Free Rx Resources
286 * @rx_ring: ring to clean the resources from
287 *
288 * Free all receive software resources
289 **/
fm10k_free_rx_resources(struct fm10k_ring * rx_ring)290 void fm10k_free_rx_resources(struct fm10k_ring *rx_ring)
291 {
292 fm10k_clean_rx_ring(rx_ring);
293
294 vfree(rx_ring->rx_buffer);
295 rx_ring->rx_buffer = NULL;
296
297 /* if not set, then don't free */
298 if (!rx_ring->desc)
299 return;
300
301 dma_free_coherent(rx_ring->dev, rx_ring->size,
302 rx_ring->desc, rx_ring->dma);
303
304 rx_ring->desc = NULL;
305 }
306
307 /**
308 * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues
309 * @interface: board private structure
310 **/
fm10k_clean_all_rx_rings(struct fm10k_intfc * interface)311 void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface)
312 {
313 int i;
314
315 for (i = 0; i < interface->num_rx_queues; i++)
316 fm10k_clean_rx_ring(interface->rx_ring[i]);
317 }
318
319 /**
320 * fm10k_free_all_rx_resources - Free Rx Resources for All Queues
321 * @interface: board private structure
322 *
323 * Free all receive software resources
324 **/
fm10k_free_all_rx_resources(struct fm10k_intfc * interface)325 static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
326 {
327 int i = interface->num_rx_queues;
328
329 while (i--)
330 fm10k_free_rx_resources(interface->rx_ring[i]);
331 }
332
333 /**
334 * fm10k_request_glort_range - Request GLORTs for use in configuring rules
335 * @interface: board private structure
336 *
337 * This function allocates a range of glorts for this interface to use.
338 **/
fm10k_request_glort_range(struct fm10k_intfc * interface)339 static void fm10k_request_glort_range(struct fm10k_intfc *interface)
340 {
341 struct fm10k_hw *hw = &interface->hw;
342 u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT;
343
344 /* establish GLORT base */
345 interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
346 interface->glort_count = 0;
347
348 /* nothing we can do until mask is allocated */
349 if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
350 return;
351
352 /* we support 3 possible GLORT configurations.
353 * 1: VFs consume all but the last 1
354 * 2: VFs and PF split glorts with possible gap between
355 * 3: VFs allocated first 64, all others belong to PF
356 */
357 if (mask <= hw->iov.total_vfs) {
358 interface->glort_count = 1;
359 interface->glort += mask;
360 } else if (mask < 64) {
361 interface->glort_count = (mask + 1) / 2;
362 interface->glort += interface->glort_count;
363 } else {
364 interface->glort_count = mask - 63;
365 interface->glort += 64;
366 }
367 }
368
369 /**
370 * fm10k_restore_udp_port_info
371 * @interface: board private structure
372 *
373 * This function restores the value in the tunnel_cfg register(s) after reset
374 **/
fm10k_restore_udp_port_info(struct fm10k_intfc * interface)375 static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
376 {
377 struct fm10k_hw *hw = &interface->hw;
378
379 /* only the PF supports configuring tunnels */
380 if (hw->mac.type != fm10k_mac_pf)
381 return;
382
383 /* restore tunnel configuration register */
384 fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
385 ntohs(interface->vxlan_port) |
386 (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
387
388 /* restore Geneve tunnel configuration register */
389 fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
390 ntohs(interface->geneve_port));
391 }
392
393 /**
394 * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
395 * @dev: network interface device structure
396 * @table: Tunnel table (according to tables of @fm10k_udp_tunnels)
397 *
398 * This function is called when a new UDP tunnel port is added or deleted.
399 * Due to hardware restrictions, only one port per type can be offloaded at
400 * once. Core will send to the driver a port of its choice.
401 **/
fm10k_udp_tunnel_sync(struct net_device * dev,unsigned int table)402 static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
403 {
404 struct fm10k_intfc *interface = netdev_priv(dev);
405 struct udp_tunnel_info ti;
406
407 udp_tunnel_nic_get_port(dev, table, 0, &ti);
408 if (!table)
409 interface->vxlan_port = ti.port;
410 else
411 interface->geneve_port = ti.port;
412
413 fm10k_restore_udp_port_info(interface);
414 return 0;
415 }
416
417 static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
418 .sync_table = fm10k_udp_tunnel_sync,
419 .tables = {
420 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
421 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
422 },
423 };
424
425 /**
426 * fm10k_open - Called when a network interface is made active
427 * @netdev: network interface device structure
428 *
429 * Returns 0 on success, negative value on failure
430 *
431 * The open entry point is called when a network interface is made
432 * active by the system (IFF_UP). At this point all resources needed
433 * for transmit and receive operations are allocated, the interrupt
434 * handler is registered with the OS, the watchdog timer is started,
435 * and the stack is notified that the interface is ready.
436 **/
fm10k_open(struct net_device * netdev)437 int fm10k_open(struct net_device *netdev)
438 {
439 struct fm10k_intfc *interface = netdev_priv(netdev);
440 int err;
441
442 /* allocate transmit descriptors */
443 err = fm10k_setup_all_tx_resources(interface);
444 if (err)
445 goto err_setup_tx;
446
447 /* allocate receive descriptors */
448 err = fm10k_setup_all_rx_resources(interface);
449 if (err)
450 goto err_setup_rx;
451
452 /* allocate interrupt resources */
453 err = fm10k_qv_request_irq(interface);
454 if (err)
455 goto err_req_irq;
456
457 /* setup GLORT assignment for this port */
458 fm10k_request_glort_range(interface);
459
460 /* Notify the stack of the actual queue counts */
461 err = netif_set_real_num_tx_queues(netdev,
462 interface->num_tx_queues);
463 if (err)
464 goto err_set_queues;
465
466 err = netif_set_real_num_rx_queues(netdev,
467 interface->num_rx_queues);
468 if (err)
469 goto err_set_queues;
470
471 fm10k_up(interface);
472
473 return 0;
474
475 err_set_queues:
476 fm10k_qv_free_irq(interface);
477 err_req_irq:
478 fm10k_free_all_rx_resources(interface);
479 err_setup_rx:
480 fm10k_free_all_tx_resources(interface);
481 err_setup_tx:
482 return err;
483 }
484
485 /**
486 * fm10k_close - Disables a network interface
487 * @netdev: network interface device structure
488 *
489 * Returns 0, this is not allowed to fail
490 *
491 * The close entry point is called when an interface is de-activated
492 * by the OS. The hardware is still under the drivers control, but
493 * needs to be disabled. A global MAC reset is issued to stop the
494 * hardware, and all transmit and receive resources are freed.
495 **/
fm10k_close(struct net_device * netdev)496 int fm10k_close(struct net_device *netdev)
497 {
498 struct fm10k_intfc *interface = netdev_priv(netdev);
499
500 fm10k_down(interface);
501
502 fm10k_qv_free_irq(interface);
503
504 fm10k_free_all_tx_resources(interface);
505 fm10k_free_all_rx_resources(interface);
506
507 return 0;
508 }
509
fm10k_xmit_frame(struct sk_buff * skb,struct net_device * dev)510 static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
511 {
512 struct fm10k_intfc *interface = netdev_priv(dev);
513 int num_tx_queues = READ_ONCE(interface->num_tx_queues);
514 unsigned int r_idx = skb->queue_mapping;
515 int err;
516
517 if (!num_tx_queues)
518 return NETDEV_TX_BUSY;
519
520 if ((skb->protocol == htons(ETH_P_8021Q)) &&
521 !skb_vlan_tag_present(skb)) {
522 /* FM10K only supports hardware tagging, any tags in frame
523 * are considered 2nd level or "outer" tags
524 */
525 struct vlan_hdr *vhdr;
526 __be16 proto;
527
528 /* make sure skb is not shared */
529 skb = skb_share_check(skb, GFP_ATOMIC);
530 if (!skb)
531 return NETDEV_TX_OK;
532
533 /* make sure there is enough room to move the ethernet header */
534 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
535 return NETDEV_TX_OK;
536
537 /* verify the skb head is not shared */
538 err = skb_cow_head(skb, 0);
539 if (err) {
540 dev_kfree_skb(skb);
541 return NETDEV_TX_OK;
542 }
543
544 /* locate VLAN header */
545 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
546
547 /* pull the 2 key pieces of data out of it */
548 __vlan_hwaccel_put_tag(skb,
549 htons(ETH_P_8021Q),
550 ntohs(vhdr->h_vlan_TCI));
551 proto = vhdr->h_vlan_encapsulated_proto;
552 skb->protocol = (ntohs(proto) >= 1536) ? proto :
553 htons(ETH_P_802_2);
554
555 /* squash it by moving the ethernet addresses up 4 bytes */
556 memmove(skb->data + VLAN_HLEN, skb->data, 12);
557 __skb_pull(skb, VLAN_HLEN);
558 skb_reset_mac_header(skb);
559 }
560
561 /* The minimum packet size for a single buffer is 17B so pad the skb
562 * in order to meet this minimum size requirement.
563 */
564 if (unlikely(skb->len < 17)) {
565 int pad_len = 17 - skb->len;
566
567 if (skb_pad(skb, pad_len))
568 return NETDEV_TX_OK;
569 __skb_put(skb, pad_len);
570 }
571
572 if (r_idx >= num_tx_queues)
573 r_idx %= num_tx_queues;
574
575 err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
576
577 return err;
578 }
579
580 /**
581 * fm10k_tx_timeout - Respond to a Tx Hang
582 * @netdev: network interface device structure
583 * @txqueue: the index of the Tx queue that timed out
584 **/
fm10k_tx_timeout(struct net_device * netdev,unsigned int txqueue)585 static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
586 {
587 struct fm10k_intfc *interface = netdev_priv(netdev);
588 struct fm10k_ring *tx_ring;
589 bool real_tx_hang = false;
590
591 if (txqueue >= interface->num_tx_queues) {
592 WARN(1, "invalid Tx queue index %d", txqueue);
593 return;
594 }
595
596 tx_ring = interface->tx_ring[txqueue];
597 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
598 real_tx_hang = true;
599
600 #define TX_TIMEO_LIMIT 16000
601 if (real_tx_hang) {
602 fm10k_tx_timeout_reset(interface);
603 } else {
604 netif_info(interface, drv, netdev,
605 "Fake Tx hang detected with timeout of %d seconds\n",
606 netdev->watchdog_timeo / HZ);
607
608 /* fake Tx hang - increase the kernel timeout */
609 if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
610 netdev->watchdog_timeo *= 2;
611 }
612 }
613
614 /**
615 * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
616 * @interface: board private structure
617 *
618 * This function checks if the PF interface's mailbox is ready before queueing
619 * mailbox messages for transmission. This will prevent filling the TX mailbox
620 * queue when the receiver is not ready. VF interfaces are exempt from this
621 * check since it will block all PF-VF mailbox messages from being sent from
622 * the VF to the PF at initialization.
623 **/
fm10k_host_mbx_ready(struct fm10k_intfc * interface)624 static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
625 {
626 struct fm10k_hw *hw = &interface->hw;
627
628 return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
629 }
630
631 /**
632 * fm10k_queue_vlan_request - Queue a VLAN update request
633 * @interface: the fm10k interface structure
634 * @vid: the VLAN vid
635 * @vsi: VSI index number
636 * @set: whether to set or clear
637 *
638 * This function queues up a VLAN update. For VFs, this must be sent to the
639 * managing PF over the mailbox. For PFs, we'll use the same handling so that
640 * it's similar to the VF. This avoids storming the PF<->VF mailbox with too
641 * many VLAN updates during reset.
642 */
fm10k_queue_vlan_request(struct fm10k_intfc * interface,u32 vid,u8 vsi,bool set)643 int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
644 u32 vid, u8 vsi, bool set)
645 {
646 struct fm10k_macvlan_request *request;
647 unsigned long flags;
648
649 /* This must be atomic since we may be called while the netdev
650 * addr_list_lock is held
651 */
652 request = kzalloc(sizeof(*request), GFP_ATOMIC);
653 if (!request)
654 return -ENOMEM;
655
656 request->type = FM10K_VLAN_REQUEST;
657 request->vlan.vid = vid;
658 request->vlan.vsi = vsi;
659 request->set = set;
660
661 spin_lock_irqsave(&interface->macvlan_lock, flags);
662 list_add_tail(&request->list, &interface->macvlan_requests);
663 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
664
665 fm10k_macvlan_schedule(interface);
666
667 return 0;
668 }
669
670 /**
671 * fm10k_queue_mac_request - Queue a MAC update request
672 * @interface: the fm10k interface structure
673 * @glort: the target glort for this update
674 * @addr: the address to update
675 * @vid: the vid to update
676 * @set: whether to add or remove
677 *
678 * This function queues up a MAC request for sending to the switch manager.
679 * A separate thread monitors the queue and sends updates to the switch
680 * manager. Return 0 on success, and negative error code on failure.
681 **/
fm10k_queue_mac_request(struct fm10k_intfc * interface,u16 glort,const unsigned char * addr,u16 vid,bool set)682 int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
683 const unsigned char *addr, u16 vid, bool set)
684 {
685 struct fm10k_macvlan_request *request;
686 unsigned long flags;
687
688 /* This must be atomic since we may be called while the netdev
689 * addr_list_lock is held
690 */
691 request = kzalloc(sizeof(*request), GFP_ATOMIC);
692 if (!request)
693 return -ENOMEM;
694
695 if (is_multicast_ether_addr(addr))
696 request->type = FM10K_MC_MAC_REQUEST;
697 else
698 request->type = FM10K_UC_MAC_REQUEST;
699
700 ether_addr_copy(request->mac.addr, addr);
701 request->mac.glort = glort;
702 request->mac.vid = vid;
703 request->set = set;
704
705 spin_lock_irqsave(&interface->macvlan_lock, flags);
706 list_add_tail(&request->list, &interface->macvlan_requests);
707 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
708
709 fm10k_macvlan_schedule(interface);
710
711 return 0;
712 }
713
714 /**
715 * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort
716 * @interface: the fm10k interface structure
717 * @glort: the target glort to clear
718 * @vlans: true to clear VLAN messages, false to ignore them
719 *
720 * Cancel any outstanding MAC/VLAN requests for a given glort. This is
721 * expected to be called when a logical port goes down.
722 **/
fm10k_clear_macvlan_queue(struct fm10k_intfc * interface,u16 glort,bool vlans)723 void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
724 u16 glort, bool vlans)
725
726 {
727 struct fm10k_macvlan_request *r, *tmp;
728 unsigned long flags;
729
730 spin_lock_irqsave(&interface->macvlan_lock, flags);
731
732 /* Free any outstanding MAC/VLAN requests for this interface */
733 list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) {
734 switch (r->type) {
735 case FM10K_MC_MAC_REQUEST:
736 case FM10K_UC_MAC_REQUEST:
737 /* Don't free requests for other interfaces */
738 if (r->mac.glort != glort)
739 break;
740 fallthrough;
741 case FM10K_VLAN_REQUEST:
742 if (vlans) {
743 list_del(&r->list);
744 kfree(r);
745 }
746 break;
747 }
748 }
749
750 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
751 }
752
fm10k_uc_vlan_unsync(struct net_device * netdev,const unsigned char * uc_addr)753 static int fm10k_uc_vlan_unsync(struct net_device *netdev,
754 const unsigned char *uc_addr)
755 {
756 struct fm10k_intfc *interface = netdev_priv(netdev);
757 u16 glort = interface->glort;
758 u16 vid = interface->vid;
759 bool set = !!(vid / VLAN_N_VID);
760 int err;
761
762 /* drop any leading bits on the VLAN ID */
763 vid &= VLAN_N_VID - 1;
764
765 err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set);
766 if (err)
767 return err;
768
769 /* return non-zero value as we are only doing a partial sync/unsync */
770 return 1;
771 }
772
fm10k_mc_vlan_unsync(struct net_device * netdev,const unsigned char * mc_addr)773 static int fm10k_mc_vlan_unsync(struct net_device *netdev,
774 const unsigned char *mc_addr)
775 {
776 struct fm10k_intfc *interface = netdev_priv(netdev);
777 u16 glort = interface->glort;
778 u16 vid = interface->vid;
779 bool set = !!(vid / VLAN_N_VID);
780 int err;
781
782 /* drop any leading bits on the VLAN ID */
783 vid &= VLAN_N_VID - 1;
784
785 err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set);
786 if (err)
787 return err;
788
789 /* return non-zero value as we are only doing a partial sync/unsync */
790 return 1;
791 }
792
fm10k_update_vid(struct net_device * netdev,u16 vid,bool set)793 static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
794 {
795 struct fm10k_intfc *interface = netdev_priv(netdev);
796 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
797 struct fm10k_hw *hw = &interface->hw;
798 u16 glort;
799 s32 err;
800 int i;
801
802 /* updates do not apply to VLAN 0 */
803 if (!vid)
804 return 0;
805
806 if (vid >= VLAN_N_VID)
807 return -EINVAL;
808
809 /* Verify that we have permission to add VLANs. If this is a request
810 * to remove a VLAN, we still want to allow the user to remove the
811 * VLAN device. In that case, we need to clear the bit in the
812 * active_vlans bitmask.
813 */
814 if (set && hw->mac.vlan_override)
815 return -EACCES;
816
817 /* update active_vlans bitmask */
818 set_bit(vid, interface->active_vlans);
819 if (!set)
820 clear_bit(vid, interface->active_vlans);
821
822 /* disable the default VLAN ID on ring if we have an active VLAN */
823 for (i = 0; i < interface->num_rx_queues; i++) {
824 struct fm10k_ring *rx_ring = interface->rx_ring[i];
825 u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
826
827 if (test_bit(rx_vid, interface->active_vlans))
828 rx_ring->vid |= FM10K_VLAN_CLEAR;
829 else
830 rx_ring->vid &= ~FM10K_VLAN_CLEAR;
831 }
832
833 /* If our VLAN has been overridden, there is no reason to send VLAN
834 * removal requests as they will be silently ignored.
835 */
836 if (hw->mac.vlan_override)
837 return 0;
838
839 /* Do not remove default VLAN ID related entries from VLAN and MAC
840 * tables
841 */
842 if (!set && vid == hw->mac.default_vid)
843 return 0;
844
845 /* Do not throw an error if the interface is down. We will sync once
846 * we come up
847 */
848 if (test_bit(__FM10K_DOWN, interface->state))
849 return 0;
850
851 fm10k_mbx_lock(interface);
852
853 /* only need to update the VLAN if not in promiscuous mode */
854 if (!(netdev->flags & IFF_PROMISC)) {
855 err = fm10k_queue_vlan_request(interface, vid, 0, set);
856 if (err)
857 goto err_out;
858 }
859
860 /* Update our base MAC address */
861 err = fm10k_queue_mac_request(interface, interface->glort,
862 hw->mac.addr, vid, set);
863 if (err)
864 goto err_out;
865
866 /* Update L2 accelerated macvlan addresses */
867 if (l2_accel) {
868 for (i = 0; i < l2_accel->size; i++) {
869 struct net_device *sdev = l2_accel->macvlan[i];
870
871 if (!sdev)
872 continue;
873
874 glort = l2_accel->dglort + 1 + i;
875
876 fm10k_queue_mac_request(interface, glort,
877 sdev->dev_addr,
878 vid, set);
879 }
880 }
881
882 /* set VLAN ID prior to syncing/unsyncing the VLAN */
883 interface->vid = vid + (set ? VLAN_N_VID : 0);
884
885 /* Update the unicast and multicast address list to add/drop VLAN */
886 __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
887 __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
888
889 err_out:
890 fm10k_mbx_unlock(interface);
891
892 return err;
893 }
894
fm10k_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)895 static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
896 __always_unused __be16 proto, u16 vid)
897 {
898 /* update VLAN and address table based on changes */
899 return fm10k_update_vid(netdev, vid, true);
900 }
901
fm10k_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)902 static int fm10k_vlan_rx_kill_vid(struct net_device *netdev,
903 __always_unused __be16 proto, u16 vid)
904 {
905 /* update VLAN and address table based on changes */
906 return fm10k_update_vid(netdev, vid, false);
907 }
908
fm10k_find_next_vlan(struct fm10k_intfc * interface,u16 vid)909 static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
910 {
911 struct fm10k_hw *hw = &interface->hw;
912 u16 default_vid = hw->mac.default_vid;
913 u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;
914
915 vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);
916
917 return vid;
918 }
919
fm10k_clear_unused_vlans(struct fm10k_intfc * interface)920 static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
921 {
922 u32 vid, prev_vid;
923
924 /* loop through and find any gaps in the table */
925 for (vid = 0, prev_vid = 0;
926 prev_vid < VLAN_N_VID;
927 prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) {
928 if (prev_vid == vid)
929 continue;
930
931 /* send request to clear multiple bits at a time */
932 prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
933 fm10k_queue_vlan_request(interface, prev_vid, 0, false);
934 }
935 }
936
__fm10k_uc_sync(struct net_device * dev,const unsigned char * addr,bool sync)937 static int __fm10k_uc_sync(struct net_device *dev,
938 const unsigned char *addr, bool sync)
939 {
940 struct fm10k_intfc *interface = netdev_priv(dev);
941 u16 vid, glort = interface->glort;
942 s32 err;
943
944 if (!is_valid_ether_addr(addr))
945 return -EADDRNOTAVAIL;
946
947 for (vid = fm10k_find_next_vlan(interface, 0);
948 vid < VLAN_N_VID;
949 vid = fm10k_find_next_vlan(interface, vid)) {
950 err = fm10k_queue_mac_request(interface, glort,
951 addr, vid, sync);
952 if (err)
953 return err;
954 }
955
956 return 0;
957 }
958
fm10k_uc_sync(struct net_device * dev,const unsigned char * addr)959 static int fm10k_uc_sync(struct net_device *dev,
960 const unsigned char *addr)
961 {
962 return __fm10k_uc_sync(dev, addr, true);
963 }
964
fm10k_uc_unsync(struct net_device * dev,const unsigned char * addr)965 static int fm10k_uc_unsync(struct net_device *dev,
966 const unsigned char *addr)
967 {
968 return __fm10k_uc_sync(dev, addr, false);
969 }
970
fm10k_set_mac(struct net_device * dev,void * p)971 static int fm10k_set_mac(struct net_device *dev, void *p)
972 {
973 struct fm10k_intfc *interface = netdev_priv(dev);
974 struct fm10k_hw *hw = &interface->hw;
975 struct sockaddr *addr = p;
976 s32 err = 0;
977
978 if (!is_valid_ether_addr(addr->sa_data))
979 return -EADDRNOTAVAIL;
980
981 if (dev->flags & IFF_UP) {
982 /* setting MAC address requires mailbox */
983 fm10k_mbx_lock(interface);
984
985 err = fm10k_uc_sync(dev, addr->sa_data);
986 if (!err)
987 fm10k_uc_unsync(dev, hw->mac.addr);
988
989 fm10k_mbx_unlock(interface);
990 }
991
992 if (!err) {
993 eth_hw_addr_set(dev, addr->sa_data);
994 ether_addr_copy(hw->mac.addr, addr->sa_data);
995 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
996 }
997
998 /* if we had a mailbox error suggest trying again */
999 return err ? -EAGAIN : 0;
1000 }
1001
__fm10k_mc_sync(struct net_device * dev,const unsigned char * addr,bool sync)1002 static int __fm10k_mc_sync(struct net_device *dev,
1003 const unsigned char *addr, bool sync)
1004 {
1005 struct fm10k_intfc *interface = netdev_priv(dev);
1006 u16 vid, glort = interface->glort;
1007 s32 err;
1008
1009 if (!is_multicast_ether_addr(addr))
1010 return -EADDRNOTAVAIL;
1011
1012 for (vid = fm10k_find_next_vlan(interface, 0);
1013 vid < VLAN_N_VID;
1014 vid = fm10k_find_next_vlan(interface, vid)) {
1015 err = fm10k_queue_mac_request(interface, glort,
1016 addr, vid, sync);
1017 if (err)
1018 return err;
1019 }
1020
1021 return 0;
1022 }
1023
fm10k_mc_sync(struct net_device * dev,const unsigned char * addr)1024 static int fm10k_mc_sync(struct net_device *dev,
1025 const unsigned char *addr)
1026 {
1027 return __fm10k_mc_sync(dev, addr, true);
1028 }
1029
fm10k_mc_unsync(struct net_device * dev,const unsigned char * addr)1030 static int fm10k_mc_unsync(struct net_device *dev,
1031 const unsigned char *addr)
1032 {
1033 return __fm10k_mc_sync(dev, addr, false);
1034 }
1035
fm10k_set_rx_mode(struct net_device * dev)1036 static void fm10k_set_rx_mode(struct net_device *dev)
1037 {
1038 struct fm10k_intfc *interface = netdev_priv(dev);
1039 struct fm10k_hw *hw = &interface->hw;
1040 int xcast_mode;
1041
1042 /* no need to update the harwdare if we are not running */
1043 if (!(dev->flags & IFF_UP))
1044 return;
1045
1046 /* determine new mode based on flags */
1047 xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC :
1048 (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI :
1049 (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1050 FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE;
1051
1052 fm10k_mbx_lock(interface);
1053
1054 /* update xcast mode first, but only if it changed */
1055 if (interface->xcast_mode != xcast_mode) {
1056 /* update VLAN table when entering promiscuous mode */
1057 if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
1058 fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
1059 0, true);
1060
1061 /* clear VLAN table when exiting promiscuous mode */
1062 if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
1063 fm10k_clear_unused_vlans(interface);
1064
1065 /* update xcast mode if host's mailbox is ready */
1066 if (fm10k_host_mbx_ready(interface))
1067 hw->mac.ops.update_xcast_mode(hw, interface->glort,
1068 xcast_mode);
1069
1070 /* record updated xcast mode state */
1071 interface->xcast_mode = xcast_mode;
1072 }
1073
1074 /* synchronize all of the addresses */
1075 __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
1076 __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
1077
1078 fm10k_mbx_unlock(interface);
1079 }
1080
fm10k_restore_rx_state(struct fm10k_intfc * interface)1081 void fm10k_restore_rx_state(struct fm10k_intfc *interface)
1082 {
1083 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1084 struct net_device *netdev = interface->netdev;
1085 struct fm10k_hw *hw = &interface->hw;
1086 int xcast_mode, i;
1087 u16 vid, glort;
1088
1089 /* record glort for this interface */
1090 glort = interface->glort;
1091
1092 /* convert interface flags to xcast mode */
1093 if (netdev->flags & IFF_PROMISC)
1094 xcast_mode = FM10K_XCAST_MODE_PROMISC;
1095 else if (netdev->flags & IFF_ALLMULTI)
1096 xcast_mode = FM10K_XCAST_MODE_ALLMULTI;
1097 else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST))
1098 xcast_mode = FM10K_XCAST_MODE_MULTI;
1099 else
1100 xcast_mode = FM10K_XCAST_MODE_NONE;
1101
1102 fm10k_mbx_lock(interface);
1103
1104 /* Enable logical port if host's mailbox is ready */
1105 if (fm10k_host_mbx_ready(interface))
1106 hw->mac.ops.update_lport_state(hw, glort,
1107 interface->glort_count, true);
1108
1109 /* update VLAN table */
1110 fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
1111 xcast_mode == FM10K_XCAST_MODE_PROMISC);
1112
1113 /* update table with current entries */
1114 for (vid = fm10k_find_next_vlan(interface, 0);
1115 vid < VLAN_N_VID;
1116 vid = fm10k_find_next_vlan(interface, vid)) {
1117 fm10k_queue_vlan_request(interface, vid, 0, true);
1118
1119 fm10k_queue_mac_request(interface, glort,
1120 hw->mac.addr, vid, true);
1121
1122 /* synchronize macvlan addresses */
1123 if (l2_accel) {
1124 for (i = 0; i < l2_accel->size; i++) {
1125 struct net_device *sdev = l2_accel->macvlan[i];
1126
1127 if (!sdev)
1128 continue;
1129
1130 glort = l2_accel->dglort + 1 + i;
1131
1132 fm10k_queue_mac_request(interface, glort,
1133 sdev->dev_addr,
1134 vid, true);
1135 }
1136 }
1137 }
1138
1139 /* update xcast mode before synchronizing addresses if host's mailbox
1140 * is ready
1141 */
1142 if (fm10k_host_mbx_ready(interface))
1143 hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
1144
1145 /* synchronize all of the addresses */
1146 __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
1147 __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
1148
1149 /* synchronize macvlan addresses */
1150 if (l2_accel) {
1151 for (i = 0; i < l2_accel->size; i++) {
1152 struct net_device *sdev = l2_accel->macvlan[i];
1153
1154 if (!sdev)
1155 continue;
1156
1157 glort = l2_accel->dglort + 1 + i;
1158
1159 hw->mac.ops.update_xcast_mode(hw, glort,
1160 FM10K_XCAST_MODE_NONE);
1161 fm10k_queue_mac_request(interface, glort,
1162 sdev->dev_addr,
1163 hw->mac.default_vid, true);
1164 }
1165 }
1166
1167 fm10k_mbx_unlock(interface);
1168
1169 /* record updated xcast mode state */
1170 interface->xcast_mode = xcast_mode;
1171
1172 /* Restore tunnel configuration */
1173 fm10k_restore_udp_port_info(interface);
1174 }
1175
fm10k_reset_rx_state(struct fm10k_intfc * interface)1176 void fm10k_reset_rx_state(struct fm10k_intfc *interface)
1177 {
1178 struct net_device *netdev = interface->netdev;
1179 struct fm10k_hw *hw = &interface->hw;
1180
1181 /* Wait for MAC/VLAN work to finish */
1182 while (test_bit(__FM10K_MACVLAN_SCHED, interface->state))
1183 usleep_range(1000, 2000);
1184
1185 /* Cancel pending MAC/VLAN requests */
1186 fm10k_clear_macvlan_queue(interface, interface->glort, true);
1187
1188 fm10k_mbx_lock(interface);
1189
1190 /* clear the logical port state on lower device if host's mailbox is
1191 * ready
1192 */
1193 if (fm10k_host_mbx_ready(interface))
1194 hw->mac.ops.update_lport_state(hw, interface->glort,
1195 interface->glort_count, false);
1196
1197 fm10k_mbx_unlock(interface);
1198
1199 /* reset flags to default state */
1200 interface->xcast_mode = FM10K_XCAST_MODE_NONE;
1201
1202 /* clear the sync flag since the lport has been dropped */
1203 __dev_uc_unsync(netdev, NULL);
1204 __dev_mc_unsync(netdev, NULL);
1205 }
1206
1207 /**
1208 * fm10k_get_stats64 - Get System Network Statistics
1209 * @netdev: network interface device structure
1210 * @stats: storage space for 64bit statistics
1211 *
1212 * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
1213 * architectures.
1214 */
fm10k_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1215 static void fm10k_get_stats64(struct net_device *netdev,
1216 struct rtnl_link_stats64 *stats)
1217 {
1218 struct fm10k_intfc *interface = netdev_priv(netdev);
1219 struct fm10k_ring *ring;
1220 unsigned int start, i;
1221 u64 bytes, packets;
1222
1223 rcu_read_lock();
1224
1225 for (i = 0; i < interface->num_rx_queues; i++) {
1226 ring = READ_ONCE(interface->rx_ring[i]);
1227
1228 if (!ring)
1229 continue;
1230
1231 do {
1232 start = u64_stats_fetch_begin_irq(&ring->syncp);
1233 packets = ring->stats.packets;
1234 bytes = ring->stats.bytes;
1235 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1236
1237 stats->rx_packets += packets;
1238 stats->rx_bytes += bytes;
1239 }
1240
1241 for (i = 0; i < interface->num_tx_queues; i++) {
1242 ring = READ_ONCE(interface->tx_ring[i]);
1243
1244 if (!ring)
1245 continue;
1246
1247 do {
1248 start = u64_stats_fetch_begin_irq(&ring->syncp);
1249 packets = ring->stats.packets;
1250 bytes = ring->stats.bytes;
1251 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1252
1253 stats->tx_packets += packets;
1254 stats->tx_bytes += bytes;
1255 }
1256
1257 rcu_read_unlock();
1258
1259 /* following stats updated by fm10k_service_task() */
1260 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1261 }
1262
fm10k_setup_tc(struct net_device * dev,u8 tc)1263 int fm10k_setup_tc(struct net_device *dev, u8 tc)
1264 {
1265 struct fm10k_intfc *interface = netdev_priv(dev);
1266 int err;
1267
1268 /* Currently only the PF supports priority classes */
1269 if (tc && (interface->hw.mac.type != fm10k_mac_pf))
1270 return -EINVAL;
1271
1272 /* Hardware supports up to 8 traffic classes */
1273 if (tc > 8)
1274 return -EINVAL;
1275
1276 /* Hardware has to reinitialize queues to match packet
1277 * buffer alignment. Unfortunately, the hardware is not
1278 * flexible enough to do this dynamically.
1279 */
1280 if (netif_running(dev))
1281 fm10k_close(dev);
1282
1283 fm10k_mbx_free_irq(interface);
1284
1285 fm10k_clear_queueing_scheme(interface);
1286
1287 /* we expect the prio_tc map to be repopulated later */
1288 netdev_reset_tc(dev);
1289 netdev_set_num_tc(dev, tc);
1290
1291 err = fm10k_init_queueing_scheme(interface);
1292 if (err)
1293 goto err_queueing_scheme;
1294
1295 err = fm10k_mbx_request_irq(interface);
1296 if (err)
1297 goto err_mbx_irq;
1298
1299 err = netif_running(dev) ? fm10k_open(dev) : 0;
1300 if (err)
1301 goto err_open;
1302
1303 /* flag to indicate SWPRI has yet to be updated */
1304 set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
1305
1306 return 0;
1307 err_open:
1308 fm10k_mbx_free_irq(interface);
1309 err_mbx_irq:
1310 fm10k_clear_queueing_scheme(interface);
1311 err_queueing_scheme:
1312 netif_device_detach(dev);
1313
1314 return err;
1315 }
1316
__fm10k_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1317 static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
1318 void *type_data)
1319 {
1320 struct tc_mqprio_qopt *mqprio = type_data;
1321
1322 if (type != TC_SETUP_QDISC_MQPRIO)
1323 return -EOPNOTSUPP;
1324
1325 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1326
1327 return fm10k_setup_tc(dev, mqprio->num_tc);
1328 }
1329
fm10k_assign_l2_accel(struct fm10k_intfc * interface,struct fm10k_l2_accel * l2_accel)1330 static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
1331 struct fm10k_l2_accel *l2_accel)
1332 {
1333 int i;
1334
1335 for (i = 0; i < interface->num_rx_queues; i++) {
1336 struct fm10k_ring *ring = interface->rx_ring[i];
1337
1338 rcu_assign_pointer(ring->l2_accel, l2_accel);
1339 }
1340
1341 interface->l2_accel = l2_accel;
1342 }
1343
fm10k_dfwd_add_station(struct net_device * dev,struct net_device * sdev)1344 static void *fm10k_dfwd_add_station(struct net_device *dev,
1345 struct net_device *sdev)
1346 {
1347 struct fm10k_intfc *interface = netdev_priv(dev);
1348 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1349 struct fm10k_l2_accel *old_l2_accel = NULL;
1350 struct fm10k_dglort_cfg dglort = { 0 };
1351 struct fm10k_hw *hw = &interface->hw;
1352 int size, i;
1353 u16 vid, glort;
1354
1355 /* The hardware supported by fm10k only filters on the destination MAC
1356 * address. In order to avoid issues we only support offloading modes
1357 * where the hardware can actually provide the functionality.
1358 */
1359 if (!macvlan_supports_dest_filter(sdev))
1360 return ERR_PTR(-EMEDIUMTYPE);
1361
1362 /* allocate l2 accel structure if it is not available */
1363 if (!l2_accel) {
1364 /* verify there is enough free GLORTs to support l2_accel */
1365 if (interface->glort_count < 7)
1366 return ERR_PTR(-EBUSY);
1367
1368 size = offsetof(struct fm10k_l2_accel, macvlan[7]);
1369 l2_accel = kzalloc(size, GFP_KERNEL);
1370 if (!l2_accel)
1371 return ERR_PTR(-ENOMEM);
1372
1373 l2_accel->size = 7;
1374 l2_accel->dglort = interface->glort;
1375
1376 /* update pointers */
1377 fm10k_assign_l2_accel(interface, l2_accel);
1378 /* do not expand if we are at our limit */
1379 } else if ((l2_accel->count == FM10K_MAX_STATIONS) ||
1380 (l2_accel->count == (interface->glort_count - 1))) {
1381 return ERR_PTR(-EBUSY);
1382 /* expand if we have hit the size limit */
1383 } else if (l2_accel->count == l2_accel->size) {
1384 old_l2_accel = l2_accel;
1385 size = offsetof(struct fm10k_l2_accel,
1386 macvlan[(l2_accel->size * 2) + 1]);
1387 l2_accel = kzalloc(size, GFP_KERNEL);
1388 if (!l2_accel)
1389 return ERR_PTR(-ENOMEM);
1390
1391 memcpy(l2_accel, old_l2_accel,
1392 offsetof(struct fm10k_l2_accel,
1393 macvlan[old_l2_accel->size]));
1394
1395 l2_accel->size = (old_l2_accel->size * 2) + 1;
1396
1397 /* update pointers */
1398 fm10k_assign_l2_accel(interface, l2_accel);
1399 kfree_rcu(old_l2_accel, rcu);
1400 }
1401
1402 /* add macvlan to accel table, and record GLORT for position */
1403 for (i = 0; i < l2_accel->size; i++) {
1404 if (!l2_accel->macvlan[i])
1405 break;
1406 }
1407
1408 /* record station */
1409 l2_accel->macvlan[i] = sdev;
1410 l2_accel->count++;
1411
1412 /* configure default DGLORT mapping for RSS/DCB */
1413 dglort.idx = fm10k_dglort_pf_rss;
1414 dglort.inner_rss = 1;
1415 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1416 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1417 dglort.glort = interface->glort;
1418 dglort.shared_l = fls(l2_accel->size);
1419 hw->mac.ops.configure_dglort_map(hw, &dglort);
1420
1421 /* Add rules for this specific dglort to the switch */
1422 fm10k_mbx_lock(interface);
1423
1424 glort = l2_accel->dglort + 1 + i;
1425
1426 if (fm10k_host_mbx_ready(interface))
1427 hw->mac.ops.update_xcast_mode(hw, glort,
1428 FM10K_XCAST_MODE_NONE);
1429
1430 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1431 hw->mac.default_vid, true);
1432
1433 for (vid = fm10k_find_next_vlan(interface, 0);
1434 vid < VLAN_N_VID;
1435 vid = fm10k_find_next_vlan(interface, vid))
1436 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1437 vid, true);
1438
1439 fm10k_mbx_unlock(interface);
1440
1441 return sdev;
1442 }
1443
fm10k_dfwd_del_station(struct net_device * dev,void * priv)1444 static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
1445 {
1446 struct fm10k_intfc *interface = netdev_priv(dev);
1447 struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
1448 struct fm10k_dglort_cfg dglort = { 0 };
1449 struct fm10k_hw *hw = &interface->hw;
1450 struct net_device *sdev = priv;
1451 u16 vid, glort;
1452 int i;
1453
1454 if (!l2_accel)
1455 return;
1456
1457 /* search table for matching interface */
1458 for (i = 0; i < l2_accel->size; i++) {
1459 if (l2_accel->macvlan[i] == sdev)
1460 break;
1461 }
1462
1463 /* exit if macvlan not found */
1464 if (i == l2_accel->size)
1465 return;
1466
1467 /* Remove any rules specific to this dglort */
1468 fm10k_mbx_lock(interface);
1469
1470 glort = l2_accel->dglort + 1 + i;
1471
1472 if (fm10k_host_mbx_ready(interface))
1473 hw->mac.ops.update_xcast_mode(hw, glort,
1474 FM10K_XCAST_MODE_NONE);
1475
1476 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1477 hw->mac.default_vid, false);
1478
1479 for (vid = fm10k_find_next_vlan(interface, 0);
1480 vid < VLAN_N_VID;
1481 vid = fm10k_find_next_vlan(interface, vid))
1482 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1483 vid, false);
1484
1485 fm10k_mbx_unlock(interface);
1486
1487 /* record removal */
1488 l2_accel->macvlan[i] = NULL;
1489 l2_accel->count--;
1490
1491 /* configure default DGLORT mapping for RSS/DCB */
1492 dglort.idx = fm10k_dglort_pf_rss;
1493 dglort.inner_rss = 1;
1494 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1495 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1496 dglort.glort = interface->glort;
1497 dglort.shared_l = fls(l2_accel->size);
1498 hw->mac.ops.configure_dglort_map(hw, &dglort);
1499
1500 /* If table is empty remove it */
1501 if (l2_accel->count == 0) {
1502 fm10k_assign_l2_accel(interface, NULL);
1503 kfree_rcu(l2_accel, rcu);
1504 }
1505 }
1506
fm10k_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1507 static netdev_features_t fm10k_features_check(struct sk_buff *skb,
1508 struct net_device *dev,
1509 netdev_features_t features)
1510 {
1511 if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
1512 return features;
1513
1514 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1515 }
1516
1517 static const struct net_device_ops fm10k_netdev_ops = {
1518 .ndo_open = fm10k_open,
1519 .ndo_stop = fm10k_close,
1520 .ndo_validate_addr = eth_validate_addr,
1521 .ndo_start_xmit = fm10k_xmit_frame,
1522 .ndo_set_mac_address = fm10k_set_mac,
1523 .ndo_tx_timeout = fm10k_tx_timeout,
1524 .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
1525 .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
1526 .ndo_set_rx_mode = fm10k_set_rx_mode,
1527 .ndo_get_stats64 = fm10k_get_stats64,
1528 .ndo_setup_tc = __fm10k_setup_tc,
1529 .ndo_set_vf_mac = fm10k_ndo_set_vf_mac,
1530 .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
1531 .ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
1532 .ndo_get_vf_config = fm10k_ndo_get_vf_config,
1533 .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
1534 .ndo_dfwd_add_station = fm10k_dfwd_add_station,
1535 .ndo_dfwd_del_station = fm10k_dfwd_del_station,
1536 .ndo_features_check = fm10k_features_check,
1537 };
1538
1539 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
1540
fm10k_alloc_netdev(const struct fm10k_info * info)1541 struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
1542 {
1543 netdev_features_t hw_features;
1544 struct fm10k_intfc *interface;
1545 struct net_device *dev;
1546
1547 dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
1548 if (!dev)
1549 return NULL;
1550
1551 /* set net device and ethtool ops */
1552 dev->netdev_ops = &fm10k_netdev_ops;
1553 fm10k_set_ethtool_ops(dev);
1554
1555 /* configure default debug level */
1556 interface = netdev_priv(dev);
1557 interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
1558
1559 /* configure default features */
1560 dev->features |= NETIF_F_IP_CSUM |
1561 NETIF_F_IPV6_CSUM |
1562 NETIF_F_SG |
1563 NETIF_F_TSO |
1564 NETIF_F_TSO6 |
1565 NETIF_F_TSO_ECN |
1566 NETIF_F_RXHASH |
1567 NETIF_F_RXCSUM;
1568
1569 /* Only the PF can support VXLAN and NVGRE tunnel offloads */
1570 if (info->mac == fm10k_mac_pf) {
1571 dev->hw_enc_features = NETIF_F_IP_CSUM |
1572 NETIF_F_TSO |
1573 NETIF_F_TSO6 |
1574 NETIF_F_TSO_ECN |
1575 NETIF_F_GSO_UDP_TUNNEL |
1576 NETIF_F_IPV6_CSUM |
1577 NETIF_F_SG;
1578
1579 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1580
1581 dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
1582 }
1583
1584 /* all features defined to this point should be changeable */
1585 hw_features = dev->features;
1586
1587 /* allow user to enable L2 forwarding acceleration */
1588 hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
1589
1590 /* configure VLAN features */
1591 dev->vlan_features |= dev->features;
1592
1593 /* we want to leave these both on as we cannot disable VLAN tag
1594 * insertion or stripping on the hardware since it is contained
1595 * in the FTAG and not in the frame itself.
1596 */
1597 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1598 NETIF_F_HW_VLAN_CTAG_RX |
1599 NETIF_F_HW_VLAN_CTAG_FILTER;
1600
1601 dev->priv_flags |= IFF_UNICAST_FLT;
1602
1603 dev->hw_features |= hw_features;
1604
1605 /* MTU range: 68 - 15342 */
1606 dev->min_mtu = ETH_MIN_MTU;
1607 dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
1608
1609 return dev;
1610 }
1611