1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
31
32 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
33
34 /**
35 * lio_delete_glists - Delete gather lists
36 * @lio: per-network private data
37 */
lio_delete_glists(struct lio * lio)38 void lio_delete_glists(struct lio *lio)
39 {
40 struct octnic_gather *g;
41 int i;
42
43 kfree(lio->glist_lock);
44 lio->glist_lock = NULL;
45
46 if (!lio->glist)
47 return;
48
49 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
50 do {
51 g = (struct octnic_gather *)
52 lio_list_delete_head(&lio->glist[i]);
53 kfree(g);
54 } while (g);
55
56 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
57 lio->glists_dma_base && lio->glists_dma_base[i]) {
58 lio_dma_free(lio->oct_dev,
59 lio->glist_entry_size * lio->tx_qsize,
60 lio->glists_virt_base[i],
61 lio->glists_dma_base[i]);
62 }
63 }
64
65 kfree(lio->glists_virt_base);
66 lio->glists_virt_base = NULL;
67
68 kfree(lio->glists_dma_base);
69 lio->glists_dma_base = NULL;
70
71 kfree(lio->glist);
72 lio->glist = NULL;
73 }
74
75 /**
76 * lio_setup_glists - Setup gather lists
77 * @oct: octeon_device
78 * @lio: per-network private data
79 * @num_iqs: count of iqs to allocate
80 */
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)81 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
82 {
83 struct octnic_gather *g;
84 int i, j;
85
86 lio->glist_lock =
87 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
88 if (!lio->glist_lock)
89 return -ENOMEM;
90
91 lio->glist =
92 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
93 if (!lio->glist) {
94 kfree(lio->glist_lock);
95 lio->glist_lock = NULL;
96 return -ENOMEM;
97 }
98
99 lio->glist_entry_size =
100 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
101
102 /* allocate memory to store virtual and dma base address of
103 * per glist consistent memory
104 */
105 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
106 GFP_KERNEL);
107 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
108 GFP_KERNEL);
109
110 if (!lio->glists_virt_base || !lio->glists_dma_base) {
111 lio_delete_glists(lio);
112 return -ENOMEM;
113 }
114
115 for (i = 0; i < num_iqs; i++) {
116 int numa_node = dev_to_node(&oct->pci_dev->dev);
117
118 spin_lock_init(&lio->glist_lock[i]);
119
120 INIT_LIST_HEAD(&lio->glist[i]);
121
122 lio->glists_virt_base[i] =
123 lio_dma_alloc(oct,
124 lio->glist_entry_size * lio->tx_qsize,
125 &lio->glists_dma_base[i]);
126
127 if (!lio->glists_virt_base[i]) {
128 lio_delete_glists(lio);
129 return -ENOMEM;
130 }
131
132 for (j = 0; j < lio->tx_qsize; j++) {
133 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
134 numa_node);
135 if (!g)
136 g = kzalloc(sizeof(*g), GFP_KERNEL);
137 if (!g)
138 break;
139
140 g->sg = lio->glists_virt_base[i] +
141 (j * lio->glist_entry_size);
142
143 g->sg_dma_ptr = lio->glists_dma_base[i] +
144 (j * lio->glist_entry_size);
145
146 list_add_tail(&g->list, &lio->glist[i]);
147 }
148
149 if (j != lio->tx_qsize) {
150 lio_delete_glists(lio);
151 return -ENOMEM;
152 }
153 }
154
155 return 0;
156 }
157
liquidio_set_feature(struct net_device * netdev,int cmd,u16 param1)158 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
159 {
160 struct lio *lio = GET_LIO(netdev);
161 struct octeon_device *oct = lio->oct_dev;
162 struct octnic_ctrl_pkt nctrl;
163 int ret = 0;
164
165 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
166
167 nctrl.ncmd.u64 = 0;
168 nctrl.ncmd.s.cmd = cmd;
169 nctrl.ncmd.s.param1 = param1;
170 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
171 nctrl.netpndev = (u64)netdev;
172 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
173
174 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
175 if (ret) {
176 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
177 ret);
178 if (ret > 0)
179 ret = -EIO;
180 }
181 return ret;
182 }
183
octeon_report_tx_completion_to_bql(void * txq,unsigned int pkts_compl,unsigned int bytes_compl)184 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
185 unsigned int bytes_compl)
186 {
187 struct netdev_queue *netdev_queue = txq;
188
189 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
190 }
191
octeon_update_tx_completion_counters(void * buf,int reqtype,unsigned int * pkts_compl,unsigned int * bytes_compl)192 void octeon_update_tx_completion_counters(void *buf, int reqtype,
193 unsigned int *pkts_compl,
194 unsigned int *bytes_compl)
195 {
196 struct octnet_buf_free_info *finfo;
197 struct sk_buff *skb = NULL;
198 struct octeon_soft_command *sc;
199
200 switch (reqtype) {
201 case REQTYPE_NORESP_NET:
202 case REQTYPE_NORESP_NET_SG:
203 finfo = buf;
204 skb = finfo->skb;
205 break;
206
207 case REQTYPE_RESP_NET_SG:
208 case REQTYPE_RESP_NET:
209 sc = buf;
210 skb = sc->callback_arg;
211 break;
212
213 default:
214 return;
215 }
216
217 (*pkts_compl)++;
218 *bytes_compl += skb->len;
219 }
220
octeon_report_sent_bytes_to_bql(void * buf,int reqtype)221 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
222 {
223 struct octnet_buf_free_info *finfo;
224 struct sk_buff *skb;
225 struct octeon_soft_command *sc;
226 struct netdev_queue *txq;
227
228 switch (reqtype) {
229 case REQTYPE_NORESP_NET:
230 case REQTYPE_NORESP_NET_SG:
231 finfo = buf;
232 skb = finfo->skb;
233 break;
234
235 case REQTYPE_RESP_NET_SG:
236 case REQTYPE_RESP_NET:
237 sc = buf;
238 skb = sc->callback_arg;
239 break;
240
241 default:
242 return 0;
243 }
244
245 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
246 netdev_tx_sent_queue(txq, skb->len);
247
248 return netif_xmit_stopped(txq);
249 }
250
liquidio_link_ctrl_cmd_completion(void * nctrl_ptr)251 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
252 {
253 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
254 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
255 struct lio *lio = GET_LIO(netdev);
256 struct octeon_device *oct = lio->oct_dev;
257 u8 *mac;
258
259 if (nctrl->sc_status)
260 return;
261
262 switch (nctrl->ncmd.s.cmd) {
263 case OCTNET_CMD_CHANGE_DEVFLAGS:
264 case OCTNET_CMD_SET_MULTI_LIST:
265 case OCTNET_CMD_SET_UC_LIST:
266 break;
267
268 case OCTNET_CMD_CHANGE_MACADDR:
269 mac = ((u8 *)&nctrl->udd[0]) + 2;
270 if (nctrl->ncmd.s.param1) {
271 /* vfidx is 0 based, but vf_num (param1) is 1 based */
272 int vfidx = nctrl->ncmd.s.param1 - 1;
273 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
274
275 if (mac_is_admin_assigned)
276 netif_info(lio, probe, lio->netdev,
277 "MAC Address %pM is configured for VF %d\n",
278 mac, vfidx);
279 } else {
280 netif_info(lio, probe, lio->netdev,
281 " MACAddr changed to %pM\n",
282 mac);
283 }
284 break;
285
286 case OCTNET_CMD_GPIO_ACCESS:
287 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
288
289 break;
290
291 case OCTNET_CMD_ID_ACTIVE:
292 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
293
294 break;
295
296 case OCTNET_CMD_LRO_ENABLE:
297 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
298 break;
299
300 case OCTNET_CMD_LRO_DISABLE:
301 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
302 netdev->name);
303 break;
304
305 case OCTNET_CMD_VERBOSE_ENABLE:
306 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
307 netdev->name);
308 break;
309
310 case OCTNET_CMD_VERBOSE_DISABLE:
311 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
312 netdev->name);
313 break;
314
315 case OCTNET_CMD_VLAN_FILTER_CTL:
316 if (nctrl->ncmd.s.param1)
317 dev_info(&oct->pci_dev->dev,
318 "%s VLAN filter enabled\n", netdev->name);
319 else
320 dev_info(&oct->pci_dev->dev,
321 "%s VLAN filter disabled\n", netdev->name);
322 break;
323
324 case OCTNET_CMD_ADD_VLAN_FILTER:
325 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
326 netdev->name, nctrl->ncmd.s.param1);
327 break;
328
329 case OCTNET_CMD_DEL_VLAN_FILTER:
330 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
331 netdev->name, nctrl->ncmd.s.param1);
332 break;
333
334 case OCTNET_CMD_SET_SETTINGS:
335 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
336 netdev->name);
337
338 break;
339
340 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
341 * Command passed by NIC driver
342 */
343 case OCTNET_CMD_TNL_RX_CSUM_CTL:
344 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
345 netif_info(lio, probe, lio->netdev,
346 "RX Checksum Offload Enabled\n");
347 } else if (nctrl->ncmd.s.param1 ==
348 OCTNET_CMD_RXCSUM_DISABLE) {
349 netif_info(lio, probe, lio->netdev,
350 "RX Checksum Offload Disabled\n");
351 }
352 break;
353
354 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
355 * Command passed by NIC driver
356 */
357 case OCTNET_CMD_TNL_TX_CSUM_CTL:
358 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
359 netif_info(lio, probe, lio->netdev,
360 "TX Checksum Offload Enabled\n");
361 } else if (nctrl->ncmd.s.param1 ==
362 OCTNET_CMD_TXCSUM_DISABLE) {
363 netif_info(lio, probe, lio->netdev,
364 "TX Checksum Offload Disabled\n");
365 }
366 break;
367
368 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
369 * Command passed by NIC driver
370 */
371 case OCTNET_CMD_VXLAN_PORT_CONFIG:
372 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
373 netif_info(lio, probe, lio->netdev,
374 "VxLAN Destination UDP PORT:%d ADDED\n",
375 nctrl->ncmd.s.param1);
376 } else if (nctrl->ncmd.s.more ==
377 OCTNET_CMD_VXLAN_PORT_DEL) {
378 netif_info(lio, probe, lio->netdev,
379 "VxLAN Destination UDP PORT:%d DELETED\n",
380 nctrl->ncmd.s.param1);
381 }
382 break;
383
384 case OCTNET_CMD_SET_FLOW_CTL:
385 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
386 break;
387
388 case OCTNET_CMD_QUEUE_COUNT_CTL:
389 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
390 nctrl->ncmd.s.param1);
391 break;
392
393 default:
394 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
395 nctrl->ncmd.s.cmd);
396 }
397 }
398
octeon_pf_changed_vf_macaddr(struct octeon_device * oct,u8 * mac)399 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
400 {
401 bool macaddr_changed = false;
402 struct net_device *netdev;
403 struct lio *lio;
404
405 rtnl_lock();
406
407 netdev = oct->props[0].netdev;
408 lio = GET_LIO(netdev);
409
410 lio->linfo.macaddr_is_admin_asgnd = true;
411
412 if (!ether_addr_equal(netdev->dev_addr, mac)) {
413 macaddr_changed = true;
414 eth_hw_addr_set(netdev, mac);
415 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
416 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
417 }
418
419 rtnl_unlock();
420
421 if (macaddr_changed)
422 dev_info(&oct->pci_dev->dev,
423 "PF changed VF's MAC address to %pM\n", mac);
424
425 /* no need to notify the firmware of the macaddr change because
426 * the PF did that already
427 */
428 }
429
octeon_schedule_rxq_oom_work(struct octeon_device * oct,struct octeon_droq * droq)430 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
431 struct octeon_droq *droq)
432 {
433 struct net_device *netdev = oct->props[0].netdev;
434 struct lio *lio = GET_LIO(netdev);
435 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
436
437 queue_delayed_work(wq->wq, &wq->wk.work,
438 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
439 }
440
octnet_poll_check_rxq_oom_status(struct work_struct * work)441 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
442 {
443 struct cavium_wk *wk = (struct cavium_wk *)work;
444 struct lio *lio = (struct lio *)wk->ctxptr;
445 struct octeon_device *oct = lio->oct_dev;
446 int q_no = wk->ctxul;
447 struct octeon_droq *droq = oct->droq[q_no];
448
449 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
450 return;
451
452 if (octeon_retry_droq_refill(droq))
453 octeon_schedule_rxq_oom_work(oct, droq);
454 }
455
setup_rx_oom_poll_fn(struct net_device * netdev)456 int setup_rx_oom_poll_fn(struct net_device *netdev)
457 {
458 struct lio *lio = GET_LIO(netdev);
459 struct octeon_device *oct = lio->oct_dev;
460 struct cavium_wq *wq;
461 int q, q_no;
462
463 for (q = 0; q < oct->num_oqs; q++) {
464 q_no = lio->linfo.rxpciq[q].s.q_no;
465 wq = &lio->rxq_status_wq[q_no];
466 wq->wq = alloc_workqueue("rxq-oom-status",
467 WQ_MEM_RECLAIM, 0);
468 if (!wq->wq) {
469 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
470 return -ENOMEM;
471 }
472
473 INIT_DELAYED_WORK(&wq->wk.work,
474 octnet_poll_check_rxq_oom_status);
475 wq->wk.ctxptr = lio;
476 wq->wk.ctxul = q_no;
477 }
478
479 return 0;
480 }
481
cleanup_rx_oom_poll_fn(struct net_device * netdev)482 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
483 {
484 struct lio *lio = GET_LIO(netdev);
485 struct octeon_device *oct = lio->oct_dev;
486 struct cavium_wq *wq;
487 int q_no;
488
489 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
490 wq = &lio->rxq_status_wq[q_no];
491 if (wq->wq) {
492 cancel_delayed_work_sync(&wq->wk.work);
493 destroy_workqueue(wq->wq);
494 wq->wq = NULL;
495 }
496 }
497 }
498
499 /* Runs in interrupt context. */
lio_update_txq_status(struct octeon_device * oct,int iq_num)500 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
501 {
502 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
503 struct net_device *netdev;
504 struct lio *lio;
505
506 netdev = oct->props[iq->ifidx].netdev;
507
508 /* This is needed because the first IQ does not have
509 * a netdev associated with it.
510 */
511 if (!netdev)
512 return;
513
514 lio = GET_LIO(netdev);
515 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
516 lio->linfo.link.s.link_up &&
517 (!octnet_iq_is_full(oct, iq_num))) {
518 netif_wake_subqueue(netdev, iq->q_index);
519 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
520 tx_restart, 1);
521 }
522 }
523
524 /**
525 * octeon_setup_droq - Setup output queue
526 * @oct: octeon device
527 * @q_no: which queue
528 * @num_descs: how many descriptors
529 * @desc_size: size of each descriptor
530 * @app_ctx: application context
531 */
octeon_setup_droq(struct octeon_device * oct,int q_no,int num_descs,int desc_size,void * app_ctx)532 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
533 int desc_size, void *app_ctx)
534 {
535 int ret_val;
536
537 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
538 /* droq creation and local register settings. */
539 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
540 if (ret_val < 0)
541 return ret_val;
542
543 if (ret_val == 1) {
544 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
545 return 0;
546 }
547
548 /* Enable the droq queues */
549 octeon_set_droq_pkt_op(oct, q_no, 1);
550
551 /* Send Credit for Octeon Output queues. Credits are always
552 * sent after the output queue is enabled.
553 */
554 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
555
556 return ret_val;
557 }
558
559 /**
560 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
561 * @octeon_id:octeon device id.
562 * @skbuff: skbuff struct to be passed to network layer.
563 * @len: size of total data received.
564 * @rh: Control header associated with the packet
565 * @param: additional control data with the packet
566 * @arg: farg registered in droq_ops
567 */
568 static void
liquidio_push_packet(u32 __maybe_unused octeon_id,void * skbuff,u32 len,union octeon_rh * rh,void * param,void * arg)569 liquidio_push_packet(u32 __maybe_unused octeon_id,
570 void *skbuff,
571 u32 len,
572 union octeon_rh *rh,
573 void *param,
574 void *arg)
575 {
576 struct net_device *netdev = (struct net_device *)arg;
577 struct octeon_droq *droq =
578 container_of(param, struct octeon_droq, napi);
579 struct sk_buff *skb = (struct sk_buff *)skbuff;
580 struct skb_shared_hwtstamps *shhwtstamps;
581 struct napi_struct *napi = param;
582 u16 vtag = 0;
583 u32 r_dh_off;
584 u64 ns;
585
586 if (netdev) {
587 struct lio *lio = GET_LIO(netdev);
588 struct octeon_device *oct = lio->oct_dev;
589
590 /* Do not proceed if the interface is not in RUNNING state. */
591 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
592 recv_buffer_free(skb);
593 droq->stats.rx_dropped++;
594 return;
595 }
596
597 skb->dev = netdev;
598
599 skb_record_rx_queue(skb, droq->q_no);
600 if (likely(len > MIN_SKB_SIZE)) {
601 struct octeon_skb_page_info *pg_info;
602 unsigned char *va;
603
604 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
605 if (pg_info->page) {
606 /* For Paged allocation use the frags */
607 va = page_address(pg_info->page) +
608 pg_info->page_offset;
609 memcpy(skb->data, va, MIN_SKB_SIZE);
610 skb_put(skb, MIN_SKB_SIZE);
611 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
612 pg_info->page,
613 pg_info->page_offset +
614 MIN_SKB_SIZE,
615 len - MIN_SKB_SIZE,
616 LIO_RXBUFFER_SZ);
617 }
618 } else {
619 struct octeon_skb_page_info *pg_info =
620 ((struct octeon_skb_page_info *)(skb->cb));
621 skb_copy_to_linear_data(skb, page_address(pg_info->page)
622 + pg_info->page_offset, len);
623 skb_put(skb, len);
624 put_page(pg_info->page);
625 }
626
627 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
628
629 if (oct->ptp_enable) {
630 if (rh->r_dh.has_hwtstamp) {
631 /* timestamp is included from the hardware at
632 * the beginning of the packet.
633 */
634 if (ifstate_check
635 (lio,
636 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
637 /* Nanoseconds are in the first 64-bits
638 * of the packet.
639 */
640 memcpy(&ns, (skb->data + r_dh_off),
641 sizeof(ns));
642 r_dh_off -= BYTES_PER_DHLEN_UNIT;
643 shhwtstamps = skb_hwtstamps(skb);
644 shhwtstamps->hwtstamp =
645 ns_to_ktime(ns +
646 lio->ptp_adjust);
647 }
648 }
649 }
650
651 if (rh->r_dh.has_hash) {
652 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
653 u32 hash = be32_to_cpu(*hash_be);
654
655 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
656 r_dh_off -= BYTES_PER_DHLEN_UNIT;
657 }
658
659 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
660 skb->protocol = eth_type_trans(skb, skb->dev);
661
662 if ((netdev->features & NETIF_F_RXCSUM) &&
663 (((rh->r_dh.encap_on) &&
664 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
665 (!(rh->r_dh.encap_on) &&
666 ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
667 CNNIC_CSUM_VERIFIED))))
668 /* checksum has already been verified */
669 skb->ip_summed = CHECKSUM_UNNECESSARY;
670 else
671 skb->ip_summed = CHECKSUM_NONE;
672
673 /* Setting Encapsulation field on basis of status received
674 * from the firmware
675 */
676 if (rh->r_dh.encap_on) {
677 skb->encapsulation = 1;
678 skb->csum_level = 1;
679 droq->stats.rx_vxlan++;
680 }
681
682 /* inbound VLAN tag */
683 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
684 rh->r_dh.vlan) {
685 u16 priority = rh->r_dh.priority;
686 u16 vid = rh->r_dh.vlan;
687
688 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
690 }
691
692 napi_gro_receive(napi, skb);
693
694 droq->stats.rx_bytes_received += len -
695 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
696 droq->stats.rx_pkts_received++;
697 } else {
698 recv_buffer_free(skb);
699 }
700 }
701
702 /**
703 * napi_schedule_wrapper - wrapper for calling napi_schedule
704 * @param: parameters to pass to napi_schedule
705 *
706 * Used when scheduling on different CPUs
707 */
napi_schedule_wrapper(void * param)708 static void napi_schedule_wrapper(void *param)
709 {
710 struct napi_struct *napi = param;
711
712 napi_schedule(napi);
713 }
714
715 /**
716 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
717 * @arg: pointer to octeon output queue
718 */
liquidio_napi_drv_callback(void * arg)719 static void liquidio_napi_drv_callback(void *arg)
720 {
721 struct octeon_device *oct;
722 struct octeon_droq *droq = arg;
723 int this_cpu = smp_processor_id();
724
725 oct = droq->oct_dev;
726
727 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
728 droq->cpu_id == this_cpu) {
729 napi_schedule_irqoff(&droq->napi);
730 } else {
731 INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
732 smp_call_function_single_async(droq->cpu_id, &droq->csd);
733 }
734 }
735
736 /**
737 * liquidio_napi_poll - Entry point for NAPI polling
738 * @napi: NAPI structure
739 * @budget: maximum number of items to process
740 */
liquidio_napi_poll(struct napi_struct * napi,int budget)741 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
742 {
743 struct octeon_instr_queue *iq;
744 struct octeon_device *oct;
745 struct octeon_droq *droq;
746 int tx_done = 0, iq_no;
747 int work_done;
748
749 droq = container_of(napi, struct octeon_droq, napi);
750 oct = droq->oct_dev;
751 iq_no = droq->q_no;
752
753 /* Handle Droq descriptors */
754 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
755
756 /* Flush the instruction queue */
757 iq = oct->instr_queue[iq_no];
758 if (iq) {
759 /* TODO: move this check to inside octeon_flush_iq,
760 * once check_db_timeout is removed
761 */
762 if (atomic_read(&iq->instr_pending))
763 /* Process iq buffers with in the budget limits */
764 tx_done = octeon_flush_iq(oct, iq, budget);
765 else
766 tx_done = 1;
767 /* Update iq read-index rather than waiting for next interrupt.
768 * Return back if tx_done is false.
769 */
770 /* sub-queue status update */
771 lio_update_txq_status(oct, iq_no);
772 } else {
773 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
774 __func__, iq_no);
775 }
776
777 #define MAX_REG_CNT 2000000U
778 /* force enable interrupt if reg cnts are high to avoid wraparound */
779 if ((work_done < budget && tx_done) ||
780 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
781 (droq->pkt_count >= MAX_REG_CNT)) {
782 napi_complete_done(napi, work_done);
783
784 octeon_enable_irq(droq->oct_dev, droq->q_no);
785 return 0;
786 }
787
788 return (!tx_done) ? (budget) : (work_done);
789 }
790
791 /**
792 * liquidio_setup_io_queues - Setup input and output queues
793 * @octeon_dev: octeon device
794 * @ifidx: Interface index
795 * @num_iqs: input io queue count
796 * @num_oqs: output io queue count
797 *
798 * Note: Queues are with respect to the octeon device. Thus
799 * an input queue is for egress packets, and output queues
800 * are for ingress packets.
801 */
liquidio_setup_io_queues(struct octeon_device * octeon_dev,int ifidx,u32 num_iqs,u32 num_oqs)802 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
803 u32 num_iqs, u32 num_oqs)
804 {
805 struct octeon_droq_ops droq_ops;
806 struct net_device *netdev;
807 struct octeon_droq *droq;
808 struct napi_struct *napi;
809 int cpu_id_modulus;
810 int num_tx_descs;
811 struct lio *lio;
812 int retval = 0;
813 int q, q_no;
814 int cpu_id;
815
816 netdev = octeon_dev->props[ifidx].netdev;
817
818 lio = GET_LIO(netdev);
819
820 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
821
822 droq_ops.fptr = liquidio_push_packet;
823 droq_ops.farg = netdev;
824
825 droq_ops.poll_mode = 1;
826 droq_ops.napi_fn = liquidio_napi_drv_callback;
827 cpu_id = 0;
828 cpu_id_modulus = num_present_cpus();
829
830 /* set up DROQs. */
831 for (q = 0; q < num_oqs; q++) {
832 q_no = lio->linfo.rxpciq[q].s.q_no;
833 dev_dbg(&octeon_dev->pci_dev->dev,
834 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
835 __func__, q, q_no);
836 retval = octeon_setup_droq(
837 octeon_dev, q_no,
838 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
839 lio->ifidx),
840 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
841 lio->ifidx),
842 NULL);
843 if (retval) {
844 dev_err(&octeon_dev->pci_dev->dev,
845 "%s : Runtime DROQ(RxQ) creation failed.\n",
846 __func__);
847 return 1;
848 }
849
850 droq = octeon_dev->droq[q_no];
851 napi = &droq->napi;
852 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
853 (u64)netdev, (u64)octeon_dev);
854 netif_napi_add(netdev, napi, liquidio_napi_poll);
855
856 /* designate a CPU for this droq */
857 droq->cpu_id = cpu_id;
858 cpu_id++;
859 if (cpu_id >= cpu_id_modulus)
860 cpu_id = 0;
861
862 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
863 }
864
865 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
866 /* 23XX PF/VF can send/recv control messages (via the first
867 * PF/VF-owned droq) from the firmware even if the ethX
868 * interface is down, so that's why poll_mode must be off
869 * for the first droq.
870 */
871 octeon_dev->droq[0]->ops.poll_mode = 0;
872 }
873
874 /* set up IQs. */
875 for (q = 0; q < num_iqs; q++) {
876 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
877 octeon_get_conf(octeon_dev), lio->ifidx);
878 retval = octeon_setup_iq(octeon_dev, ifidx, q,
879 lio->linfo.txpciq[q], num_tx_descs,
880 netdev_get_tx_queue(netdev, q));
881 if (retval) {
882 dev_err(&octeon_dev->pci_dev->dev,
883 " %s : Runtime IQ(TxQ) creation failed.\n",
884 __func__);
885 return 1;
886 }
887
888 /* XPS */
889 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
890 octeon_dev->ioq_vector) {
891 struct octeon_ioq_vector *ioq_vector;
892
893 ioq_vector = &octeon_dev->ioq_vector[q];
894 netif_set_xps_queue(netdev,
895 &ioq_vector->affinity_mask,
896 ioq_vector->iq_index);
897 }
898 }
899
900 return 0;
901 }
902
903 static
liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq * droq,u64 ret)904 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
905 {
906 struct octeon_device *oct = droq->oct_dev;
907 struct octeon_device_priv *oct_priv =
908 (struct octeon_device_priv *)oct->priv;
909
910 if (droq->ops.poll_mode) {
911 droq->ops.napi_fn(droq);
912 } else {
913 if (ret & MSIX_PO_INT) {
914 if (OCTEON_CN23XX_VF(oct))
915 dev_err(&oct->pci_dev->dev,
916 "should not come here should not get rx when poll mode = 0 for vf\n");
917 tasklet_schedule(&oct_priv->droq_tasklet);
918 return 1;
919 }
920 /* this will be flushed periodically by check iq db */
921 if (ret & MSIX_PI_INT)
922 return 0;
923 }
924
925 return 0;
926 }
927
928 irqreturn_t
liquidio_msix_intr_handler(int __maybe_unused irq,void * dev)929 liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
930 {
931 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
932 struct octeon_device *oct = ioq_vector->oct_dev;
933 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
934 u64 ret;
935
936 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
937
938 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
939 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
940
941 return IRQ_HANDLED;
942 }
943
944 /**
945 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
946 * @oct: octeon device
947 */
liquidio_schedule_droq_pkt_handlers(struct octeon_device * oct)948 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
949 {
950 struct octeon_device_priv *oct_priv =
951 (struct octeon_device_priv *)oct->priv;
952 struct octeon_droq *droq;
953 u64 oq_no;
954
955 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
956 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
957 oq_no++) {
958 if (!(oct->droq_intr & BIT_ULL(oq_no)))
959 continue;
960
961 droq = oct->droq[oq_no];
962
963 if (droq->ops.poll_mode) {
964 droq->ops.napi_fn(droq);
965 oct_priv->napi_mask |= BIT_ULL(oq_no);
966 } else {
967 tasklet_schedule(&oct_priv->droq_tasklet);
968 }
969 }
970 }
971 }
972
973 /**
974 * liquidio_legacy_intr_handler - Interrupt handler for octeon
975 * @irq: unused
976 * @dev: octeon device
977 */
978 static
liquidio_legacy_intr_handler(int __maybe_unused irq,void * dev)979 irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
980 {
981 struct octeon_device *oct = (struct octeon_device *)dev;
982 irqreturn_t ret;
983
984 /* Disable our interrupts for the duration of ISR */
985 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
986
987 ret = oct->fn_list.process_interrupt_regs(oct);
988
989 if (ret == IRQ_HANDLED)
990 liquidio_schedule_droq_pkt_handlers(oct);
991
992 /* Re-enable our interrupts */
993 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
994 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
995
996 return ret;
997 }
998
999 /**
1000 * octeon_setup_interrupt - Setup interrupt for octeon device
1001 * @oct: octeon device
1002 * @num_ioqs: number of queues
1003 *
1004 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1005 */
octeon_setup_interrupt(struct octeon_device * oct,u32 num_ioqs)1006 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1007 {
1008 struct msix_entry *msix_entries;
1009 char *queue_irq_names = NULL;
1010 int i, num_interrupts = 0;
1011 int num_alloc_ioq_vectors;
1012 char *aux_irq_name = NULL;
1013 int num_ioq_vectors;
1014 int irqret, err;
1015
1016 if (oct->msix_on) {
1017 oct->num_msix_irqs = num_ioqs;
1018 if (OCTEON_CN23XX_PF(oct)) {
1019 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1020
1021 /* one non ioq interrupt for handling
1022 * sli_mac_pf_int_sum
1023 */
1024 oct->num_msix_irqs += 1;
1025 } else if (OCTEON_CN23XX_VF(oct)) {
1026 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1027 }
1028
1029 /* allocate storage for the names assigned to each irq */
1030 oct->irq_name_storage =
1031 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1032 if (!oct->irq_name_storage) {
1033 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1034 return -ENOMEM;
1035 }
1036
1037 queue_irq_names = oct->irq_name_storage;
1038
1039 if (OCTEON_CN23XX_PF(oct))
1040 aux_irq_name = &queue_irq_names
1041 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1042
1043 oct->msix_entries = kcalloc(oct->num_msix_irqs,
1044 sizeof(struct msix_entry),
1045 GFP_KERNEL);
1046 if (!oct->msix_entries) {
1047 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1048 kfree(oct->irq_name_storage);
1049 oct->irq_name_storage = NULL;
1050 return -ENOMEM;
1051 }
1052
1053 msix_entries = (struct msix_entry *)oct->msix_entries;
1054
1055 /*Assumption is that pf msix vectors start from pf srn to pf to
1056 * trs and not from 0. if not change this code
1057 */
1058 if (OCTEON_CN23XX_PF(oct)) {
1059 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1060 msix_entries[i].entry =
1061 oct->sriov_info.pf_srn + i;
1062
1063 msix_entries[oct->num_msix_irqs - 1].entry =
1064 oct->sriov_info.trs;
1065 } else if (OCTEON_CN23XX_VF(oct)) {
1066 for (i = 0; i < oct->num_msix_irqs; i++)
1067 msix_entries[i].entry = i;
1068 }
1069 num_alloc_ioq_vectors = pci_enable_msix_range(
1070 oct->pci_dev, msix_entries,
1071 oct->num_msix_irqs,
1072 oct->num_msix_irqs);
1073 if (num_alloc_ioq_vectors < 0) {
1074 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1075 kfree(oct->msix_entries);
1076 oct->msix_entries = NULL;
1077 kfree(oct->irq_name_storage);
1078 oct->irq_name_storage = NULL;
1079 return num_alloc_ioq_vectors;
1080 }
1081
1082 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1083
1084 num_ioq_vectors = oct->num_msix_irqs;
1085 /* For PF, there is one non-ioq interrupt handler */
1086 if (OCTEON_CN23XX_PF(oct)) {
1087 num_ioq_vectors -= 1;
1088
1089 snprintf(aux_irq_name, INTRNAMSIZ,
1090 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1091 oct->pf_num);
1092 irqret = request_irq(
1093 msix_entries[num_ioq_vectors].vector,
1094 liquidio_legacy_intr_handler, 0,
1095 aux_irq_name, oct);
1096 if (irqret) {
1097 dev_err(&oct->pci_dev->dev,
1098 "Request_irq failed for MSIX interrupt Error: %d\n",
1099 irqret);
1100 pci_disable_msix(oct->pci_dev);
1101 kfree(oct->msix_entries);
1102 kfree(oct->irq_name_storage);
1103 oct->irq_name_storage = NULL;
1104 oct->msix_entries = NULL;
1105 return irqret;
1106 }
1107 }
1108 for (i = 0 ; i < num_ioq_vectors ; i++) {
1109 if (OCTEON_CN23XX_PF(oct))
1110 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1111 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1112 oct->octeon_id, oct->pf_num, i);
1113
1114 if (OCTEON_CN23XX_VF(oct))
1115 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1116 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1117 oct->octeon_id, oct->vf_num, i);
1118
1119 irqret = request_irq(msix_entries[i].vector,
1120 liquidio_msix_intr_handler, 0,
1121 &queue_irq_names[IRQ_NAME_OFF(i)],
1122 &oct->ioq_vector[i]);
1123
1124 if (irqret) {
1125 dev_err(&oct->pci_dev->dev,
1126 "Request_irq failed for MSIX interrupt Error: %d\n",
1127 irqret);
1128 /* Freeing the non-ioq irq vector here . */
1129 free_irq(msix_entries[num_ioq_vectors].vector,
1130 oct);
1131
1132 while (i) {
1133 i--;
1134 /* clearing affinity mask. */
1135 irq_set_affinity_hint(
1136 msix_entries[i].vector,
1137 NULL);
1138 free_irq(msix_entries[i].vector,
1139 &oct->ioq_vector[i]);
1140 }
1141 pci_disable_msix(oct->pci_dev);
1142 kfree(oct->msix_entries);
1143 kfree(oct->irq_name_storage);
1144 oct->irq_name_storage = NULL;
1145 oct->msix_entries = NULL;
1146 return irqret;
1147 }
1148 oct->ioq_vector[i].vector = msix_entries[i].vector;
1149 /* assign the cpu mask for this msix interrupt vector */
1150 irq_set_affinity_hint(msix_entries[i].vector,
1151 &oct->ioq_vector[i].affinity_mask
1152 );
1153 }
1154 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1155 oct->octeon_id);
1156 } else {
1157 err = pci_enable_msi(oct->pci_dev);
1158 if (err)
1159 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1160 err);
1161 else
1162 oct->flags |= LIO_FLAG_MSI_ENABLED;
1163
1164 /* allocate storage for the names assigned to the irq */
1165 oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
1166 if (!oct->irq_name_storage)
1167 return -ENOMEM;
1168
1169 queue_irq_names = oct->irq_name_storage;
1170
1171 if (OCTEON_CN23XX_PF(oct))
1172 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1173 "LiquidIO%u-pf%u-rxtx-%u",
1174 oct->octeon_id, oct->pf_num, 0);
1175
1176 if (OCTEON_CN23XX_VF(oct))
1177 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1178 "LiquidIO%u-vf%u-rxtx-%u",
1179 oct->octeon_id, oct->vf_num, 0);
1180
1181 irqret = request_irq(oct->pci_dev->irq,
1182 liquidio_legacy_intr_handler,
1183 IRQF_SHARED,
1184 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1185 if (irqret) {
1186 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1187 pci_disable_msi(oct->pci_dev);
1188 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1189 irqret);
1190 kfree(oct->irq_name_storage);
1191 oct->irq_name_storage = NULL;
1192 return irqret;
1193 }
1194 }
1195 return 0;
1196 }
1197
1198 /**
1199 * liquidio_change_mtu - Net device change_mtu
1200 * @netdev: network device
1201 * @new_mtu: the new max transmit unit size
1202 */
liquidio_change_mtu(struct net_device * netdev,int new_mtu)1203 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1204 {
1205 struct lio *lio = GET_LIO(netdev);
1206 struct octeon_device *oct = lio->oct_dev;
1207 struct octeon_soft_command *sc;
1208 union octnet_cmd *ncmd;
1209 int ret = 0;
1210
1211 sc = (struct octeon_soft_command *)
1212 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1213 if (!sc) {
1214 netif_info(lio, rx_err, lio->netdev,
1215 "Failed to allocate soft command\n");
1216 return -ENOMEM;
1217 }
1218
1219 ncmd = (union octnet_cmd *)sc->virtdptr;
1220
1221 init_completion(&sc->complete);
1222 sc->sc_status = OCTEON_REQUEST_PENDING;
1223
1224 ncmd->u64 = 0;
1225 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1226 ncmd->s.param1 = new_mtu;
1227
1228 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1229
1230 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1231
1232 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1233 OPCODE_NIC_CMD, 0, 0, 0);
1234
1235 ret = octeon_send_soft_command(oct, sc);
1236 if (ret == IQ_SEND_FAILED) {
1237 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1238 octeon_free_soft_command(oct, sc);
1239 return -EINVAL;
1240 }
1241 /* Sleep on a wait queue till the cond flag indicates that the
1242 * response arrived or timed-out.
1243 */
1244 ret = wait_for_sc_completion_timeout(oct, sc, 0);
1245 if (ret)
1246 return ret;
1247
1248 if (sc->sc_status) {
1249 WRITE_ONCE(sc->caller_is_done, true);
1250 return -EINVAL;
1251 }
1252
1253 netdev->mtu = new_mtu;
1254 lio->mtu = new_mtu;
1255
1256 WRITE_ONCE(sc->caller_is_done, true);
1257 return 0;
1258 }
1259
lio_wait_for_clean_oq(struct octeon_device * oct)1260 int lio_wait_for_clean_oq(struct octeon_device *oct)
1261 {
1262 int retry = 100, pending_pkts = 0;
1263 int idx;
1264
1265 do {
1266 pending_pkts = 0;
1267
1268 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1269 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1270 continue;
1271 pending_pkts +=
1272 atomic_read(&oct->droq[idx]->pkts_pending);
1273 }
1274
1275 if (pending_pkts > 0)
1276 schedule_timeout_uninterruptible(1);
1277
1278 } while (retry-- && pending_pkts);
1279
1280 return pending_pkts;
1281 }
1282
1283 static void
octnet_nic_stats_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1284 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1285 u32 status, void *ptr)
1286 {
1287 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1288 struct oct_nic_stats_resp *resp =
1289 (struct oct_nic_stats_resp *)sc->virtrptr;
1290 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1291 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1292 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1293 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1294
1295 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1296 octeon_swap_8B_data((u64 *)&resp->stats,
1297 (sizeof(struct oct_link_stats)) >> 3);
1298
1299 /* RX link-level stats */
1300 rstats->total_rcvd = rsp_rstats->total_rcvd;
1301 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1302 rstats->total_bcst = rsp_rstats->total_bcst;
1303 rstats->total_mcst = rsp_rstats->total_mcst;
1304 rstats->runts = rsp_rstats->runts;
1305 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1306 /* Accounts for over/under-run of buffers */
1307 rstats->fifo_err = rsp_rstats->fifo_err;
1308 rstats->dmac_drop = rsp_rstats->dmac_drop;
1309 rstats->fcs_err = rsp_rstats->fcs_err;
1310 rstats->jabber_err = rsp_rstats->jabber_err;
1311 rstats->l2_err = rsp_rstats->l2_err;
1312 rstats->frame_err = rsp_rstats->frame_err;
1313 rstats->red_drops = rsp_rstats->red_drops;
1314
1315 /* RX firmware stats */
1316 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1317 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1318 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1319 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1320 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1321 rstats->fw_err_link = rsp_rstats->fw_err_link;
1322 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1323 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1324 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1325
1326 /* Number of packets that are LROed */
1327 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1328 /* Number of octets that are LROed */
1329 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1330 /* Number of LRO packets formed */
1331 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1332 /* Number of times lRO of packet aborted */
1333 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1334 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1335 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1336 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1337 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1338 /* intrmod: packet forward rate */
1339 rstats->fwd_rate = rsp_rstats->fwd_rate;
1340
1341 /* TX link-level stats */
1342 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1343 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1344 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1345 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1346 tstats->ctl_sent = rsp_tstats->ctl_sent;
1347 /* Packets sent after one collision*/
1348 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1349 /* Packets sent after multiple collision*/
1350 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1351 /* Packets not sent due to max collisions */
1352 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1353 /* Packets not sent due to max deferrals */
1354 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1355 /* Accounts for over/under-run of buffers */
1356 tstats->fifo_err = rsp_tstats->fifo_err;
1357 tstats->runts = rsp_tstats->runts;
1358 /* Total number of collisions detected */
1359 tstats->total_collisions = rsp_tstats->total_collisions;
1360
1361 /* firmware stats */
1362 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1363 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1364 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1365 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1366 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1367 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1368 tstats->fw_err_link = rsp_tstats->fw_err_link;
1369 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1370 tstats->fw_tso = rsp_tstats->fw_tso;
1371 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1372 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1373 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1374
1375 resp->status = 1;
1376 } else {
1377 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1378 resp->status = -1;
1379 }
1380 }
1381
lio_fetch_vf_stats(struct lio * lio)1382 static int lio_fetch_vf_stats(struct lio *lio)
1383 {
1384 struct octeon_device *oct_dev = lio->oct_dev;
1385 struct octeon_soft_command *sc;
1386 struct oct_nic_vf_stats_resp *resp;
1387
1388 int retval;
1389
1390 /* Alloc soft command */
1391 sc = (struct octeon_soft_command *)
1392 octeon_alloc_soft_command(oct_dev,
1393 0,
1394 sizeof(struct oct_nic_vf_stats_resp),
1395 0);
1396
1397 if (!sc) {
1398 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1399 retval = -ENOMEM;
1400 goto lio_fetch_vf_stats_exit;
1401 }
1402
1403 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1404 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1405
1406 init_completion(&sc->complete);
1407 sc->sc_status = OCTEON_REQUEST_PENDING;
1408
1409 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1410
1411 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1412 OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1413
1414 retval = octeon_send_soft_command(oct_dev, sc);
1415 if (retval == IQ_SEND_FAILED) {
1416 octeon_free_soft_command(oct_dev, sc);
1417 goto lio_fetch_vf_stats_exit;
1418 }
1419
1420 retval =
1421 wait_for_sc_completion_timeout(oct_dev, sc,
1422 (2 * LIO_SC_MAX_TMO_MS));
1423 if (retval) {
1424 dev_err(&oct_dev->pci_dev->dev,
1425 "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1426 goto lio_fetch_vf_stats_exit;
1427 }
1428
1429 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1430 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1431 (sizeof(u64)) >> 3);
1432
1433 if (resp->spoofmac_cnt != 0) {
1434 dev_warn(&oct_dev->pci_dev->dev,
1435 "%llu Spoofed packets detected\n",
1436 resp->spoofmac_cnt);
1437 }
1438 }
1439 WRITE_ONCE(sc->caller_is_done, 1);
1440
1441 lio_fetch_vf_stats_exit:
1442 return retval;
1443 }
1444
lio_fetch_stats(struct work_struct * work)1445 void lio_fetch_stats(struct work_struct *work)
1446 {
1447 struct cavium_wk *wk = (struct cavium_wk *)work;
1448 struct lio *lio = wk->ctxptr;
1449 struct octeon_device *oct_dev = lio->oct_dev;
1450 struct octeon_soft_command *sc;
1451 struct oct_nic_stats_resp *resp;
1452 unsigned long time_in_jiffies;
1453 int retval;
1454
1455 if (OCTEON_CN23XX_PF(oct_dev)) {
1456 /* report spoofchk every 2 seconds */
1457 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1458 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1459 oct_dev->sriov_info.num_vfs_alloced) {
1460 lio_fetch_vf_stats(lio);
1461 }
1462
1463 oct_dev->vfstats_poll++;
1464 }
1465
1466 /* Alloc soft command */
1467 sc = (struct octeon_soft_command *)
1468 octeon_alloc_soft_command(oct_dev,
1469 0,
1470 sizeof(struct oct_nic_stats_resp),
1471 0);
1472
1473 if (!sc) {
1474 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1475 goto lio_fetch_stats_exit;
1476 }
1477
1478 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1479 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1480
1481 init_completion(&sc->complete);
1482 sc->sc_status = OCTEON_REQUEST_PENDING;
1483
1484 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1485
1486 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1487 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1488
1489 retval = octeon_send_soft_command(oct_dev, sc);
1490 if (retval == IQ_SEND_FAILED) {
1491 octeon_free_soft_command(oct_dev, sc);
1492 goto lio_fetch_stats_exit;
1493 }
1494
1495 retval = wait_for_sc_completion_timeout(oct_dev, sc,
1496 (2 * LIO_SC_MAX_TMO_MS));
1497 if (retval) {
1498 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1499 goto lio_fetch_stats_exit;
1500 }
1501
1502 octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1503 WRITE_ONCE(sc->caller_is_done, true);
1504
1505 lio_fetch_stats_exit:
1506 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1507 if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1508 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1509
1510 return;
1511 }
1512
liquidio_set_speed(struct lio * lio,int speed)1513 int liquidio_set_speed(struct lio *lio, int speed)
1514 {
1515 struct octeon_device *oct = lio->oct_dev;
1516 struct oct_nic_seapi_resp *resp;
1517 struct octeon_soft_command *sc;
1518 union octnet_cmd *ncmd;
1519 int retval;
1520 u32 var;
1521
1522 if (oct->speed_setting == speed)
1523 return 0;
1524
1525 if (!OCTEON_CN23XX_PF(oct)) {
1526 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1527 __func__);
1528 return -EOPNOTSUPP;
1529 }
1530
1531 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1532 sizeof(struct oct_nic_seapi_resp),
1533 0);
1534 if (!sc)
1535 return -ENOMEM;
1536
1537 ncmd = sc->virtdptr;
1538 resp = sc->virtrptr;
1539 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1540
1541 init_completion(&sc->complete);
1542 sc->sc_status = OCTEON_REQUEST_PENDING;
1543
1544 ncmd->u64 = 0;
1545 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1546 ncmd->s.param1 = speed;
1547
1548 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1549
1550 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1551
1552 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1553 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1554
1555 retval = octeon_send_soft_command(oct, sc);
1556 if (retval == IQ_SEND_FAILED) {
1557 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1558 octeon_free_soft_command(oct, sc);
1559 retval = -EBUSY;
1560 } else {
1561 /* Wait for response or timeout */
1562 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1563 if (retval)
1564 return retval;
1565
1566 retval = resp->status;
1567
1568 if (retval) {
1569 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1570 __func__, retval);
1571 WRITE_ONCE(sc->caller_is_done, true);
1572
1573 return -EIO;
1574 }
1575
1576 var = be32_to_cpu((__force __be32)resp->speed);
1577 if (var != speed) {
1578 dev_err(&oct->pci_dev->dev,
1579 "%s: setting failed speed= %x, expect %x\n",
1580 __func__, var, speed);
1581 }
1582
1583 oct->speed_setting = var;
1584 WRITE_ONCE(sc->caller_is_done, true);
1585 }
1586
1587 return retval;
1588 }
1589
liquidio_get_speed(struct lio * lio)1590 int liquidio_get_speed(struct lio *lio)
1591 {
1592 struct octeon_device *oct = lio->oct_dev;
1593 struct oct_nic_seapi_resp *resp;
1594 struct octeon_soft_command *sc;
1595 union octnet_cmd *ncmd;
1596 int retval;
1597
1598 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1599 sizeof(struct oct_nic_seapi_resp),
1600 0);
1601 if (!sc)
1602 return -ENOMEM;
1603
1604 ncmd = sc->virtdptr;
1605 resp = sc->virtrptr;
1606 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1607
1608 init_completion(&sc->complete);
1609 sc->sc_status = OCTEON_REQUEST_PENDING;
1610
1611 ncmd->u64 = 0;
1612 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1613
1614 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1615
1616 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1617
1618 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1619 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1620
1621 retval = octeon_send_soft_command(oct, sc);
1622 if (retval == IQ_SEND_FAILED) {
1623 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1624 octeon_free_soft_command(oct, sc);
1625 retval = -EIO;
1626 } else {
1627 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1628 if (retval)
1629 return retval;
1630
1631 retval = resp->status;
1632 if (retval) {
1633 dev_err(&oct->pci_dev->dev,
1634 "%s failed retval=%d\n", __func__, retval);
1635 retval = -EIO;
1636 } else {
1637 u32 var;
1638
1639 var = be32_to_cpu((__force __be32)resp->speed);
1640 oct->speed_setting = var;
1641 if (var == 0xffff) {
1642 /* unable to access boot variables
1643 * get the default value based on the NIC type
1644 */
1645 if (oct->subsystem_id ==
1646 OCTEON_CN2350_25GB_SUBSYS_ID ||
1647 oct->subsystem_id ==
1648 OCTEON_CN2360_25GB_SUBSYS_ID) {
1649 oct->no_speed_setting = 1;
1650 oct->speed_setting = 25;
1651 } else {
1652 oct->speed_setting = 10;
1653 }
1654 }
1655
1656 }
1657 WRITE_ONCE(sc->caller_is_done, true);
1658 }
1659
1660 return retval;
1661 }
1662
liquidio_set_fec(struct lio * lio,int on_off)1663 int liquidio_set_fec(struct lio *lio, int on_off)
1664 {
1665 struct oct_nic_seapi_resp *resp;
1666 struct octeon_soft_command *sc;
1667 struct octeon_device *oct;
1668 union octnet_cmd *ncmd;
1669 int retval;
1670 u32 var;
1671
1672 oct = lio->oct_dev;
1673
1674 if (oct->props[lio->ifidx].fec == on_off)
1675 return 0;
1676
1677 if (!OCTEON_CN23XX_PF(oct)) {
1678 dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1679 __func__);
1680 return -1;
1681 }
1682
1683 if (oct->speed_boot != 25) {
1684 dev_err(&oct->pci_dev->dev,
1685 "Set FEC only when link speed is 25G during insmod\n");
1686 return -1;
1687 }
1688
1689 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1690 sizeof(struct oct_nic_seapi_resp), 0);
1691 if (!sc) {
1692 dev_err(&oct->pci_dev->dev,
1693 "Failed to allocate soft command\n");
1694 return -ENOMEM;
1695 }
1696
1697 ncmd = sc->virtdptr;
1698 resp = sc->virtrptr;
1699 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1700
1701 init_completion(&sc->complete);
1702 sc->sc_status = OCTEON_REQUEST_PENDING;
1703
1704 ncmd->u64 = 0;
1705 ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1706 ncmd->s.param1 = on_off;
1707 /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1708
1709 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1710
1711 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1712
1713 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1714 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1715
1716 retval = octeon_send_soft_command(oct, sc);
1717 if (retval == IQ_SEND_FAILED) {
1718 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1719 octeon_free_soft_command(oct, sc);
1720 return -EIO;
1721 }
1722
1723 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1724 if (retval)
1725 return (-EIO);
1726
1727 var = be32_to_cpu(resp->fec_setting);
1728 resp->fec_setting = var;
1729 if (var != on_off) {
1730 dev_err(&oct->pci_dev->dev,
1731 "Setting failed fec= %x, expect %x\n",
1732 var, on_off);
1733 oct->props[lio->ifidx].fec = var;
1734 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1735 oct->props[lio->ifidx].fec = 1;
1736 else
1737 oct->props[lio->ifidx].fec = 0;
1738 }
1739
1740 WRITE_ONCE(sc->caller_is_done, true);
1741
1742 if (oct->props[lio->ifidx].fec !=
1743 oct->props[lio->ifidx].fec_boot) {
1744 dev_dbg(&oct->pci_dev->dev,
1745 "Reload driver to change fec to %s\n",
1746 oct->props[lio->ifidx].fec ? "on" : "off");
1747 }
1748
1749 return retval;
1750 }
1751
liquidio_get_fec(struct lio * lio)1752 int liquidio_get_fec(struct lio *lio)
1753 {
1754 struct oct_nic_seapi_resp *resp;
1755 struct octeon_soft_command *sc;
1756 struct octeon_device *oct;
1757 union octnet_cmd *ncmd;
1758 int retval;
1759 u32 var;
1760
1761 oct = lio->oct_dev;
1762
1763 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1764 sizeof(struct oct_nic_seapi_resp), 0);
1765 if (!sc)
1766 return -ENOMEM;
1767
1768 ncmd = sc->virtdptr;
1769 resp = sc->virtrptr;
1770 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1771
1772 init_completion(&sc->complete);
1773 sc->sc_status = OCTEON_REQUEST_PENDING;
1774
1775 ncmd->u64 = 0;
1776 ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1777
1778 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1779
1780 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1781
1782 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1783 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1784
1785 retval = octeon_send_soft_command(oct, sc);
1786 if (retval == IQ_SEND_FAILED) {
1787 dev_info(&oct->pci_dev->dev,
1788 "%s: Failed to send soft command\n", __func__);
1789 octeon_free_soft_command(oct, sc);
1790 return -EIO;
1791 }
1792
1793 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1794 if (retval)
1795 return retval;
1796
1797 var = be32_to_cpu(resp->fec_setting);
1798 resp->fec_setting = var;
1799 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1800 oct->props[lio->ifidx].fec = 1;
1801 else
1802 oct->props[lio->ifidx].fec = 0;
1803
1804 WRITE_ONCE(sc->caller_is_done, true);
1805
1806 if (oct->props[lio->ifidx].fec !=
1807 oct->props[lio->ifidx].fec_boot) {
1808 dev_dbg(&oct->pci_dev->dev,
1809 "Reload driver to change fec to %s\n",
1810 oct->props[lio->ifidx].fec ? "on" : "off");
1811 }
1812
1813 return retval;
1814 }
1815