1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
be_is_mc(struct be_adapter * adapter)121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125 }
126
be_queue_free(struct be_adapter * adapter,struct be_queue_info * q)127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va) {
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
133 mem->va = NULL;
134 }
135 }
136
be_queue_alloc(struct be_adapter * adapter,struct be_queue_info * q,u16 len,u16 entry_size)137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139 {
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
148 if (!mem->va)
149 return -ENOMEM;
150 memset(mem->va, 0, mem->size);
151 return 0;
152 }
153
be_intr_set(struct be_adapter * adapter,bool enable)154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156 u32 reg, enabled;
157
158 if (adapter->eeh_err)
159 return;
160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 ®);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else
170 return;
171
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
be_rxq_notify(struct be_adapter * adapter,u16 qid,u16 posted)176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182 wmb();
183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
be_txq_notify(struct be_adapter * adapter,u16 qid,u16 posted)186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192 wmb();
193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
be_eq_notify(struct be_adapter * adapter,u16 qid,bool arm,bool clear_int,u16 num_popped)196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 bool arm, bool clear_int, u16 num_popped)
198 {
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204 if (adapter->eeh_err)
205 return;
206
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
be_cq_notify(struct be_adapter * adapter,u16 qid,bool arm,u16 num_popped)216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223 if (adapter->eeh_err)
224 return;
225
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
be_mac_addr_set(struct net_device * netdev,void * p)232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
237 u8 current_mac[ETH_ALEN];
238 u32 pmac_id = adapter->pmac_id[0];
239
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
243 status = be_cmd_mac_addr_query(adapter, current_mac,
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
246 if (status)
247 goto err;
248
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 adapter->if_handle, &adapter->pmac_id[0], 0);
252 if (status)
253 goto err;
254
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259 err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261 return status;
262 }
263
populate_be2_stats(struct be_adapter * adapter)264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 struct be_port_rxf_stats_v0 *port_stats =
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
272
273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
301 drvs->jabber_events = rxf_stats->port1_jabber_events;
302 else
303 drvs->jabber_events = rxf_stats->port0_jabber_events;
304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
populate_be3_stats(struct be_adapter * adapter)313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 struct be_port_rxf_stats_v1 *port_stats =
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
321
322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
populate_lancer_stats(struct be_adapter * adapter)358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 drvs->jabber_events = pport_stats->rx_jabbers;
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 drvs->rx_drops_too_many_frags =
394 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
accumulate_16bit_val(u32 * acc,u16 val)397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x) (x & 0xFFFF)
400 #define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407 }
408
be_parse_stats(struct be_adapter * adapter)409 void be_parse_stats(struct be_adapter *adapter)
410 {
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
421 populate_be2_stats(adapter);
422 }
423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
432 }
433
be_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)434 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
436 {
437 struct be_adapter *adapter = netdev_priv(netdev);
438 struct be_drv_stats *drvs = &adapter->drv_stats;
439 struct be_rx_obj *rxo;
440 struct be_tx_obj *txo;
441 u64 pkts, bytes;
442 unsigned int start;
443 int i;
444
445 for_all_rx_queues(adapter, rxo, i) {
446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
457 }
458
459 for_all_tx_queues(adapter, txo, i) {
460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
468 }
469
470 /* bad pkts received */
471 stats->rx_errors = drvs->rx_crc_errors +
472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
480 drvs->rx_dropped_runt;
481
482 /* detailed rx errors */
483 stats->rx_length_errors = drvs->rx_in_range_errors +
484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
486
487 stats->rx_crc_errors = drvs->rx_crc_errors;
488
489 /* frame alignment errors */
490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491
492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
497 return stats;
498 }
499
be_link_status_update(struct be_adapter * adapter,u8 link_status)500 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501 {
502 struct net_device *netdev = adapter->netdev;
503
504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
505 netif_carrier_off(netdev);
506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
507 }
508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
513 }
514
be_tx_stats_update(struct be_tx_obj * txo,u32 wrb_cnt,u32 copied,u32 gso_segs,bool stopped)515 static void be_tx_stats_update(struct be_tx_obj *txo,
516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517 {
518 struct be_tx_stats *stats = tx_stats(txo);
519
520 u64_stats_update_begin(&stats->sync);
521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
525 if (stopped)
526 stats->tx_stops++;
527 u64_stats_update_end(&stats->sync);
528 }
529
530 /* Determine number of WRB entries needed to xmit data in an skb */
wrb_cnt_for_skb(struct be_adapter * adapter,struct sk_buff * skb,bool * dummy)531 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
533 {
534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
538 /* to account for hdr wrb */
539 cnt++;
540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
546 }
547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549 }
550
wrb_fill(struct be_eth_wrb * wrb,u64 addr,int len)551 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552 {
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556 }
557
be_get_tx_vlan_tag(struct be_adapter * adapter,struct sk_buff * skb)558 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560 {
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572 }
573
be_vlan_tag_chk(struct be_adapter * adapter,struct sk_buff * skb)574 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
575 {
576 return vlan_tx_tag_present(skb) || adapter->pvid;
577 }
578
wrb_fill_hdr(struct be_adapter * adapter,struct be_eth_hdr_wrb * hdr,struct sk_buff * skb,u32 wrb_cnt,u32 len)579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582 u16 vlan_tag;
583
584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
588 if (skb_is_gso(skb)) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
611 if (vlan_tx_tag_present(skb)) {
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622
unmap_tx_frag(struct device * dev,struct be_eth_wrb * wrb,bool unmap_single)623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624 bool unmap_single)
625 {
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631 if (wrb->frag_len) {
632 if (unmap_single)
633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
635 else
636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637 }
638 }
639
make_tx_wrbs(struct be_adapter * adapter,struct be_queue_info * txq,struct sk_buff * skb,u32 wrb_cnt,bool dummy_wrb)640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643 dma_addr_t busaddr;
644 int i, copied = 0;
645 struct device *dev = &adapter->pdev->dev;
646 struct sk_buff *first_skb = skb;
647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
649 bool map_single = false;
650 u16 map_head;
651
652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
654 map_head = txq->head;
655
656 if (skb->len > skb->data_len) {
657 int len = skb_headlen(skb);
658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
660 goto dma_err;
661 map_single = true;
662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
668
669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670 const struct skb_frag_struct *frag =
671 &skb_shinfo(skb)->frags[i];
672 busaddr = skb_frag_dma_map(dev, frag, 0,
673 skb_frag_size(frag), DMA_TO_DEVICE);
674 if (dma_mapping_error(dev, busaddr))
675 goto dma_err;
676 wrb = queue_head_node(txq);
677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
680 copied += skb_frag_size(frag);
681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
694 dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
698 unmap_tx_frag(dev, wrb, map_single);
699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
704 }
705
be_insert_vlan_in_pkt(struct be_adapter * adapter,struct sk_buff * skb)706 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
707 struct sk_buff *skb)
708 {
709 u16 vlan_tag = 0;
710
711 skb = skb_share_check(skb, GFP_ATOMIC);
712 if (unlikely(!skb))
713 return skb;
714
715 if (vlan_tx_tag_present(skb)) {
716 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
717 __vlan_put_tag(skb, vlan_tag);
718 skb->vlan_tci = 0;
719 }
720
721 return skb;
722 }
723
be_xmit(struct sk_buff * skb,struct net_device * netdev)724 static netdev_tx_t be_xmit(struct sk_buff *skb,
725 struct net_device *netdev)
726 {
727 struct be_adapter *adapter = netdev_priv(netdev);
728 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
729 struct be_queue_info *txq = &txo->q;
730 struct iphdr *ip = NULL;
731 u32 wrb_cnt = 0, copied = 0;
732 u32 start = txq->head, eth_hdr_len;
733 bool dummy_wrb, stopped = false;
734
735 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
736 VLAN_ETH_HLEN : ETH_HLEN;
737
738 /* HW has a bug which considers padding bytes as legal
739 * and modifies the IPv4 hdr's 'tot_len' field
740 */
741 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
742 is_ipv4_pkt(skb)) {
743 ip = (struct iphdr *)ip_hdr(skb);
744 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
745 }
746
747 /* HW has a bug wherein it will calculate CSUM for VLAN
748 * pkts even though it is disabled.
749 * Manually insert VLAN in pkt.
750 */
751 if (skb->ip_summed != CHECKSUM_PARTIAL &&
752 be_vlan_tag_chk(adapter, skb)) {
753 skb = be_insert_vlan_in_pkt(adapter, skb);
754 if (unlikely(!skb))
755 goto tx_drop;
756 }
757
758 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
759
760 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
761 if (copied) {
762 int gso_segs = skb_shinfo(skb)->gso_segs;
763
764 /* record the sent skb in the sent_skb table */
765 BUG_ON(txo->sent_skb_list[start]);
766 txo->sent_skb_list[start] = skb;
767
768 /* Ensure txq has space for the next skb; Else stop the queue
769 * *BEFORE* ringing the tx doorbell, so that we serialze the
770 * tx compls of the current transmit which'll wake up the queue
771 */
772 atomic_add(wrb_cnt, &txq->used);
773 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
774 txq->len) {
775 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
776 stopped = true;
777 }
778
779 be_txq_notify(adapter, txq->id, wrb_cnt);
780
781 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
782 } else {
783 txq->head = start;
784 dev_kfree_skb_any(skb);
785 }
786 tx_drop:
787 return NETDEV_TX_OK;
788 }
789
be_change_mtu(struct net_device * netdev,int new_mtu)790 static int be_change_mtu(struct net_device *netdev, int new_mtu)
791 {
792 struct be_adapter *adapter = netdev_priv(netdev);
793 if (new_mtu < BE_MIN_MTU ||
794 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
795 (ETH_HLEN + ETH_FCS_LEN))) {
796 dev_info(&adapter->pdev->dev,
797 "MTU must be between %d and %d bytes\n",
798 BE_MIN_MTU,
799 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
800 return -EINVAL;
801 }
802 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
803 netdev->mtu, new_mtu);
804 netdev->mtu = new_mtu;
805 return 0;
806 }
807
808 /*
809 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
810 * If the user configures more, place BE in vlan promiscuous mode.
811 */
be_vid_config(struct be_adapter * adapter,bool vf,u32 vf_num)812 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
813 {
814 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
815 u16 vtag[BE_NUM_VLANS_SUPPORTED];
816 u16 ntags = 0, i;
817 int status = 0;
818
819 if (vf) {
820 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
821 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
822 1, 1, 0);
823 }
824
825 /* No need to further configure vids if in promiscuous mode */
826 if (adapter->promiscuous)
827 return 0;
828
829 if (adapter->vlans_added <= adapter->max_vlans) {
830 /* Construct VLAN Table to give to HW */
831 for (i = 0; i < VLAN_N_VID; i++) {
832 if (adapter->vlan_tag[i]) {
833 vtag[ntags] = cpu_to_le16(i);
834 ntags++;
835 }
836 }
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
838 vtag, ntags, 1, 0);
839 } else {
840 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841 NULL, 0, 1, 1);
842 }
843
844 return status;
845 }
846
be_vlan_add_vid(struct net_device * netdev,u16 vid)847 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
848 {
849 struct be_adapter *adapter = netdev_priv(netdev);
850 int status = 0;
851
852 if (!be_physfn(adapter)) {
853 status = -EINVAL;
854 goto ret;
855 }
856
857 adapter->vlan_tag[vid] = 1;
858 if (adapter->vlans_added <= (adapter->max_vlans + 1))
859 status = be_vid_config(adapter, false, 0);
860
861 if (!status)
862 adapter->vlans_added++;
863 else
864 adapter->vlan_tag[vid] = 0;
865 ret:
866 return status;
867 }
868
be_vlan_rem_vid(struct net_device * netdev,u16 vid)869 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
870 {
871 struct be_adapter *adapter = netdev_priv(netdev);
872 int status = 0;
873
874 if (!be_physfn(adapter)) {
875 status = -EINVAL;
876 goto ret;
877 }
878
879 adapter->vlan_tag[vid] = 0;
880 if (adapter->vlans_added <= adapter->max_vlans)
881 status = be_vid_config(adapter, false, 0);
882
883 if (!status)
884 adapter->vlans_added--;
885 else
886 adapter->vlan_tag[vid] = 1;
887 ret:
888 return status;
889 }
890
be_set_rx_mode(struct net_device * netdev)891 static void be_set_rx_mode(struct net_device *netdev)
892 {
893 struct be_adapter *adapter = netdev_priv(netdev);
894
895 if (netdev->flags & IFF_PROMISC) {
896 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
897 adapter->promiscuous = true;
898 goto done;
899 }
900
901 /* BE was previously in promiscuous mode; disable it */
902 if (adapter->promiscuous) {
903 adapter->promiscuous = false;
904 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
905
906 if (adapter->vlans_added)
907 be_vid_config(adapter, false, 0);
908 }
909
910 /* Enable multicast promisc if num configured exceeds what we support */
911 if (netdev->flags & IFF_ALLMULTI ||
912 netdev_mc_count(netdev) > BE_MAX_MC) {
913 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
914 goto done;
915 }
916
917 if (netdev_uc_count(netdev) != adapter->uc_macs) {
918 struct netdev_hw_addr *ha;
919 int i = 1; /* First slot is claimed by the Primary MAC */
920
921 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
922 be_cmd_pmac_del(adapter, adapter->if_handle,
923 adapter->pmac_id[i], 0);
924 }
925
926 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
927 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
928 adapter->promiscuous = true;
929 goto done;
930 }
931
932 netdev_for_each_uc_addr(ha, adapter->netdev) {
933 adapter->uc_macs++; /* First slot is for Primary MAC */
934 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
935 adapter->if_handle,
936 &adapter->pmac_id[adapter->uc_macs], 0);
937 }
938 }
939
940 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
941 done:
942 return;
943 }
944
be_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)945 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
946 {
947 struct be_adapter *adapter = netdev_priv(netdev);
948 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
949 int status;
950
951 if (!sriov_enabled(adapter))
952 return -EPERM;
953
954 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
955 return -EINVAL;
956
957 if (lancer_chip(adapter)) {
958 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
959 } else {
960 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
961 vf_cfg->pmac_id, vf + 1);
962
963 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
964 &vf_cfg->pmac_id, vf + 1);
965 }
966
967 if (status)
968 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
969 mac, vf);
970 else
971 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
972
973 return status;
974 }
975
be_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * vi)976 static int be_get_vf_config(struct net_device *netdev, int vf,
977 struct ifla_vf_info *vi)
978 {
979 struct be_adapter *adapter = netdev_priv(netdev);
980 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
981
982 if (!sriov_enabled(adapter))
983 return -EPERM;
984
985 if (vf >= adapter->num_vfs)
986 return -EINVAL;
987
988 vi->vf = vf;
989 vi->tx_rate = vf_cfg->tx_rate;
990 vi->vlan = vf_cfg->vlan_tag;
991 vi->qos = 0;
992 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
993
994 return 0;
995 }
996
be_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos)997 static int be_set_vf_vlan(struct net_device *netdev,
998 int vf, u16 vlan, u8 qos)
999 {
1000 struct be_adapter *adapter = netdev_priv(netdev);
1001 int status = 0;
1002
1003 if (!sriov_enabled(adapter))
1004 return -EPERM;
1005
1006 if (vf >= adapter->num_vfs || vlan > 4095)
1007 return -EINVAL;
1008
1009 if (vlan) {
1010 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1011 /* If this is new value, program it. Else skip. */
1012 adapter->vf_cfg[vf].vlan_tag = vlan;
1013
1014 status = be_cmd_set_hsw_config(adapter, vlan,
1015 vf + 1, adapter->vf_cfg[vf].if_handle);
1016 }
1017 } else {
1018 /* Reset Transparent Vlan Tagging. */
1019 adapter->vf_cfg[vf].vlan_tag = 0;
1020 vlan = adapter->vf_cfg[vf].def_vid;
1021 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1022 adapter->vf_cfg[vf].if_handle);
1023 }
1024
1025
1026 if (status)
1027 dev_info(&adapter->pdev->dev,
1028 "VLAN %d config on VF %d failed\n", vlan, vf);
1029 return status;
1030 }
1031
be_set_vf_tx_rate(struct net_device * netdev,int vf,int rate)1032 static int be_set_vf_tx_rate(struct net_device *netdev,
1033 int vf, int rate)
1034 {
1035 struct be_adapter *adapter = netdev_priv(netdev);
1036 int status = 0;
1037
1038 if (!sriov_enabled(adapter))
1039 return -EPERM;
1040
1041 if (vf >= adapter->num_vfs)
1042 return -EINVAL;
1043
1044 if (rate < 100 || rate > 10000) {
1045 dev_err(&adapter->pdev->dev,
1046 "tx rate must be between 100 and 10000 Mbps\n");
1047 return -EINVAL;
1048 }
1049
1050 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1051
1052 if (status)
1053 dev_err(&adapter->pdev->dev,
1054 "tx rate %d on VF %d failed\n", rate, vf);
1055 else
1056 adapter->vf_cfg[vf].tx_rate = rate;
1057 return status;
1058 }
1059
be_eqd_update(struct be_adapter * adapter,struct be_eq_obj * eqo)1060 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1061 {
1062 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1063 ulong now = jiffies;
1064 ulong delta = now - stats->rx_jiffies;
1065 u64 pkts;
1066 unsigned int start, eqd;
1067
1068 if (!eqo->enable_aic) {
1069 eqd = eqo->eqd;
1070 goto modify_eqd;
1071 }
1072
1073 if (eqo->idx >= adapter->num_rx_qs)
1074 return;
1075
1076 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1077
1078 /* Wrapped around */
1079 if (time_before(now, stats->rx_jiffies)) {
1080 stats->rx_jiffies = now;
1081 return;
1082 }
1083
1084 /* Update once a second */
1085 if (delta < HZ)
1086 return;
1087
1088 do {
1089 start = u64_stats_fetch_begin_bh(&stats->sync);
1090 pkts = stats->rx_pkts;
1091 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1092
1093 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1094 stats->rx_pkts_prev = pkts;
1095 stats->rx_jiffies = now;
1096 eqd = (stats->rx_pps / 110000) << 3;
1097 eqd = min(eqd, eqo->max_eqd);
1098 eqd = max(eqd, eqo->min_eqd);
1099 if (eqd < 10)
1100 eqd = 0;
1101
1102 modify_eqd:
1103 if (eqd != eqo->cur_eqd) {
1104 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1105 eqo->cur_eqd = eqd;
1106 }
1107 }
1108
be_rx_stats_update(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1109 static void be_rx_stats_update(struct be_rx_obj *rxo,
1110 struct be_rx_compl_info *rxcp)
1111 {
1112 struct be_rx_stats *stats = rx_stats(rxo);
1113
1114 u64_stats_update_begin(&stats->sync);
1115 stats->rx_compl++;
1116 stats->rx_bytes += rxcp->pkt_size;
1117 stats->rx_pkts++;
1118 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1119 stats->rx_mcast_pkts++;
1120 if (rxcp->err)
1121 stats->rx_compl_err++;
1122 u64_stats_update_end(&stats->sync);
1123 }
1124
csum_passed(struct be_rx_compl_info * rxcp)1125 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1126 {
1127 /* L4 checksum is not reliable for non TCP/UDP packets.
1128 * Also ignore ipcksm for ipv6 pkts */
1129 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1130 (rxcp->ip_csum || rxcp->ipv6);
1131 }
1132
get_rx_page_info(struct be_rx_obj * rxo,u16 frag_idx)1133 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1134 u16 frag_idx)
1135 {
1136 struct be_adapter *adapter = rxo->adapter;
1137 struct be_rx_page_info *rx_page_info;
1138 struct be_queue_info *rxq = &rxo->q;
1139
1140 rx_page_info = &rxo->page_info_tbl[frag_idx];
1141 BUG_ON(!rx_page_info->page);
1142
1143 if (rx_page_info->last_page_user) {
1144 dma_unmap_page(&adapter->pdev->dev,
1145 dma_unmap_addr(rx_page_info, bus),
1146 adapter->big_page_size, DMA_FROM_DEVICE);
1147 rx_page_info->last_page_user = false;
1148 }
1149
1150 atomic_dec(&rxq->used);
1151 return rx_page_info;
1152 }
1153
1154 /* Throwaway the data in the Rx completion */
be_rx_compl_discard(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1155 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1156 struct be_rx_compl_info *rxcp)
1157 {
1158 struct be_queue_info *rxq = &rxo->q;
1159 struct be_rx_page_info *page_info;
1160 u16 i, num_rcvd = rxcp->num_rcvd;
1161
1162 for (i = 0; i < num_rcvd; i++) {
1163 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1164 put_page(page_info->page);
1165 memset(page_info, 0, sizeof(*page_info));
1166 index_inc(&rxcp->rxq_idx, rxq->len);
1167 }
1168 }
1169
1170 /*
1171 * skb_fill_rx_data forms a complete skb for an ether frame
1172 * indicated by rxcp.
1173 */
skb_fill_rx_data(struct be_rx_obj * rxo,struct sk_buff * skb,struct be_rx_compl_info * rxcp)1174 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1175 struct be_rx_compl_info *rxcp)
1176 {
1177 struct be_queue_info *rxq = &rxo->q;
1178 struct be_rx_page_info *page_info;
1179 u16 i, j;
1180 u16 hdr_len, curr_frag_len, remaining;
1181 u8 *start;
1182
1183 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1184 start = page_address(page_info->page) + page_info->page_offset;
1185 prefetch(start);
1186
1187 /* Copy data in the first descriptor of this completion */
1188 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1189
1190 /* Copy the header portion into skb_data */
1191 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1192 memcpy(skb->data, start, hdr_len);
1193 skb->len = curr_frag_len;
1194 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1195 /* Complete packet has now been moved to data */
1196 put_page(page_info->page);
1197 skb->data_len = 0;
1198 skb->tail += curr_frag_len;
1199 } else {
1200 skb_shinfo(skb)->nr_frags = 1;
1201 skb_frag_set_page(skb, 0, page_info->page);
1202 skb_shinfo(skb)->frags[0].page_offset =
1203 page_info->page_offset + hdr_len;
1204 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1205 skb->data_len = curr_frag_len - hdr_len;
1206 skb->truesize += rx_frag_size;
1207 skb->tail += hdr_len;
1208 }
1209 page_info->page = NULL;
1210
1211 if (rxcp->pkt_size <= rx_frag_size) {
1212 BUG_ON(rxcp->num_rcvd != 1);
1213 return;
1214 }
1215
1216 /* More frags present for this completion */
1217 index_inc(&rxcp->rxq_idx, rxq->len);
1218 remaining = rxcp->pkt_size - curr_frag_len;
1219 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1220 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1221 curr_frag_len = min(remaining, rx_frag_size);
1222
1223 /* Coalesce all frags from the same physical page in one slot */
1224 if (page_info->page_offset == 0) {
1225 /* Fresh page */
1226 j++;
1227 skb_frag_set_page(skb, j, page_info->page);
1228 skb_shinfo(skb)->frags[j].page_offset =
1229 page_info->page_offset;
1230 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1231 skb_shinfo(skb)->nr_frags++;
1232 } else {
1233 put_page(page_info->page);
1234 }
1235
1236 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1237 skb->len += curr_frag_len;
1238 skb->data_len += curr_frag_len;
1239 skb->truesize += rx_frag_size;
1240 remaining -= curr_frag_len;
1241 index_inc(&rxcp->rxq_idx, rxq->len);
1242 page_info->page = NULL;
1243 }
1244 BUG_ON(j > MAX_SKB_FRAGS);
1245 }
1246
1247 /* Process the RX completion indicated by rxcp when GRO is disabled */
be_rx_compl_process(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1248 static void be_rx_compl_process(struct be_rx_obj *rxo,
1249 struct be_rx_compl_info *rxcp)
1250 {
1251 struct be_adapter *adapter = rxo->adapter;
1252 struct net_device *netdev = adapter->netdev;
1253 struct sk_buff *skb;
1254
1255 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1256 if (unlikely(!skb)) {
1257 rx_stats(rxo)->rx_drops_no_skbs++;
1258 be_rx_compl_discard(rxo, rxcp);
1259 return;
1260 }
1261
1262 skb_fill_rx_data(rxo, skb, rxcp);
1263
1264 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1265 skb->ip_summed = CHECKSUM_UNNECESSARY;
1266 else
1267 skb_checksum_none_assert(skb);
1268
1269 skb->protocol = eth_type_trans(skb, netdev);
1270 if (netdev->features & NETIF_F_RXHASH)
1271 skb->rxhash = rxcp->rss_hash;
1272
1273
1274 if (rxcp->vlanf)
1275 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1276
1277 netif_receive_skb(skb);
1278 }
1279
1280 /* Process the RX completion indicated by rxcp when GRO is enabled */
be_rx_compl_process_gro(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)1281 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1282 struct be_rx_compl_info *rxcp)
1283 {
1284 struct be_adapter *adapter = rxo->adapter;
1285 struct be_rx_page_info *page_info;
1286 struct sk_buff *skb = NULL;
1287 struct be_queue_info *rxq = &rxo->q;
1288 u16 remaining, curr_frag_len;
1289 u16 i, j;
1290
1291 skb = napi_get_frags(napi);
1292 if (!skb) {
1293 be_rx_compl_discard(rxo, rxcp);
1294 return;
1295 }
1296
1297 remaining = rxcp->pkt_size;
1298 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1299 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1300
1301 curr_frag_len = min(remaining, rx_frag_size);
1302
1303 /* Coalesce all frags from the same physical page in one slot */
1304 if (i == 0 || page_info->page_offset == 0) {
1305 /* First frag or Fresh page */
1306 j++;
1307 skb_frag_set_page(skb, j, page_info->page);
1308 skb_shinfo(skb)->frags[j].page_offset =
1309 page_info->page_offset;
1310 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1311 } else {
1312 put_page(page_info->page);
1313 }
1314 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1315 skb->truesize += rx_frag_size;
1316 remaining -= curr_frag_len;
1317 index_inc(&rxcp->rxq_idx, rxq->len);
1318 memset(page_info, 0, sizeof(*page_info));
1319 }
1320 BUG_ON(j > MAX_SKB_FRAGS);
1321
1322 skb_shinfo(skb)->nr_frags = j + 1;
1323 skb->len = rxcp->pkt_size;
1324 skb->data_len = rxcp->pkt_size;
1325 skb->ip_summed = CHECKSUM_UNNECESSARY;
1326 if (adapter->netdev->features & NETIF_F_RXHASH)
1327 skb->rxhash = rxcp->rss_hash;
1328
1329 if (rxcp->vlanf)
1330 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1331
1332 napi_gro_frags(napi);
1333 }
1334
be_parse_rx_compl_v1(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)1335 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1336 struct be_rx_compl_info *rxcp)
1337 {
1338 rxcp->pkt_size =
1339 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1340 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1341 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1342 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1343 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1344 rxcp->ip_csum =
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1346 rxcp->l4_csum =
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1348 rxcp->ipv6 =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1350 rxcp->rxq_idx =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1352 rxcp->num_rcvd =
1353 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1354 rxcp->pkt_type =
1355 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1356 rxcp->rss_hash =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1358 if (rxcp->vlanf) {
1359 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1360 compl);
1361 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1362 compl);
1363 }
1364 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1365 }
1366
be_parse_rx_compl_v0(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)1367 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1368 struct be_rx_compl_info *rxcp)
1369 {
1370 rxcp->pkt_size =
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1372 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1373 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1374 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1375 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1376 rxcp->ip_csum =
1377 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1378 rxcp->l4_csum =
1379 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1380 rxcp->ipv6 =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1382 rxcp->rxq_idx =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1384 rxcp->num_rcvd =
1385 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1386 rxcp->pkt_type =
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1388 rxcp->rss_hash =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1390 if (rxcp->vlanf) {
1391 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1392 compl);
1393 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1394 compl);
1395 }
1396 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1397 }
1398
be_rx_compl_get(struct be_rx_obj * rxo)1399 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1400 {
1401 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1402 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1403 struct be_adapter *adapter = rxo->adapter;
1404
1405 /* For checking the valid bit it is Ok to use either definition as the
1406 * valid bit is at the same position in both v0 and v1 Rx compl */
1407 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1408 return NULL;
1409
1410 rmb();
1411 be_dws_le_to_cpu(compl, sizeof(*compl));
1412
1413 if (adapter->be3_native)
1414 be_parse_rx_compl_v1(compl, rxcp);
1415 else
1416 be_parse_rx_compl_v0(compl, rxcp);
1417
1418 if (rxcp->vlanf) {
1419 /* vlanf could be wrongly set in some cards.
1420 * ignore if vtm is not set */
1421 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1422 rxcp->vlanf = 0;
1423
1424 if (!lancer_chip(adapter))
1425 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1426
1427 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1428 !adapter->vlan_tag[rxcp->vlan_tag])
1429 rxcp->vlanf = 0;
1430 }
1431
1432 /* As the compl has been parsed, reset it; we wont touch it again */
1433 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1434
1435 queue_tail_inc(&rxo->cq);
1436 return rxcp;
1437 }
1438
be_alloc_pages(u32 size,gfp_t gfp)1439 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1440 {
1441 u32 order = get_order(size);
1442
1443 if (order > 0)
1444 gfp |= __GFP_COMP;
1445 return alloc_pages(gfp, order);
1446 }
1447
1448 /*
1449 * Allocate a page, split it to fragments of size rx_frag_size and post as
1450 * receive buffers to BE
1451 */
be_post_rx_frags(struct be_rx_obj * rxo,gfp_t gfp)1452 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1453 {
1454 struct be_adapter *adapter = rxo->adapter;
1455 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1456 struct be_queue_info *rxq = &rxo->q;
1457 struct page *pagep = NULL;
1458 struct be_eth_rx_d *rxd;
1459 u64 page_dmaaddr = 0, frag_dmaaddr;
1460 u32 posted, page_offset = 0;
1461
1462 page_info = &rxo->page_info_tbl[rxq->head];
1463 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1464 if (!pagep) {
1465 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1466 if (unlikely(!pagep)) {
1467 rx_stats(rxo)->rx_post_fail++;
1468 break;
1469 }
1470 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1471 0, adapter->big_page_size,
1472 DMA_FROM_DEVICE);
1473 page_info->page_offset = 0;
1474 } else {
1475 get_page(pagep);
1476 page_info->page_offset = page_offset + rx_frag_size;
1477 }
1478 page_offset = page_info->page_offset;
1479 page_info->page = pagep;
1480 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1481 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1482
1483 rxd = queue_head_node(rxq);
1484 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1485 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1486
1487 /* Any space left in the current big page for another frag? */
1488 if ((page_offset + rx_frag_size + rx_frag_size) >
1489 adapter->big_page_size) {
1490 pagep = NULL;
1491 page_info->last_page_user = true;
1492 }
1493
1494 prev_page_info = page_info;
1495 queue_head_inc(rxq);
1496 page_info = &rxo->page_info_tbl[rxq->head];
1497 }
1498 if (pagep)
1499 prev_page_info->last_page_user = true;
1500
1501 if (posted) {
1502 atomic_add(posted, &rxq->used);
1503 be_rxq_notify(adapter, rxq->id, posted);
1504 } else if (atomic_read(&rxq->used) == 0) {
1505 /* Let be_worker replenish when memory is available */
1506 rxo->rx_post_starved = true;
1507 }
1508 }
1509
be_tx_compl_get(struct be_queue_info * tx_cq)1510 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1511 {
1512 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1513
1514 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1515 return NULL;
1516
1517 rmb();
1518 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1519
1520 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1521
1522 queue_tail_inc(tx_cq);
1523 return txcp;
1524 }
1525
be_tx_compl_process(struct be_adapter * adapter,struct be_tx_obj * txo,u16 last_index)1526 static u16 be_tx_compl_process(struct be_adapter *adapter,
1527 struct be_tx_obj *txo, u16 last_index)
1528 {
1529 struct be_queue_info *txq = &txo->q;
1530 struct be_eth_wrb *wrb;
1531 struct sk_buff **sent_skbs = txo->sent_skb_list;
1532 struct sk_buff *sent_skb;
1533 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1534 bool unmap_skb_hdr = true;
1535
1536 sent_skb = sent_skbs[txq->tail];
1537 BUG_ON(!sent_skb);
1538 sent_skbs[txq->tail] = NULL;
1539
1540 /* skip header wrb */
1541 queue_tail_inc(txq);
1542
1543 do {
1544 cur_index = txq->tail;
1545 wrb = queue_tail_node(txq);
1546 unmap_tx_frag(&adapter->pdev->dev, wrb,
1547 (unmap_skb_hdr && skb_headlen(sent_skb)));
1548 unmap_skb_hdr = false;
1549
1550 num_wrbs++;
1551 queue_tail_inc(txq);
1552 } while (cur_index != last_index);
1553
1554 kfree_skb(sent_skb);
1555 return num_wrbs;
1556 }
1557
1558 /* Return the number of events in the event queue */
events_get(struct be_eq_obj * eqo)1559 static inline int events_get(struct be_eq_obj *eqo)
1560 {
1561 struct be_eq_entry *eqe;
1562 int num = 0;
1563
1564 do {
1565 eqe = queue_tail_node(&eqo->q);
1566 if (eqe->evt == 0)
1567 break;
1568
1569 rmb();
1570 eqe->evt = 0;
1571 num++;
1572 queue_tail_inc(&eqo->q);
1573 } while (true);
1574
1575 return num;
1576 }
1577
event_handle(struct be_eq_obj * eqo)1578 static int event_handle(struct be_eq_obj *eqo)
1579 {
1580 bool rearm = false;
1581 int num = events_get(eqo);
1582
1583 /* Deal with any spurious interrupts that come without events */
1584 if (!num)
1585 rearm = true;
1586
1587 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1588 if (num)
1589 napi_schedule(&eqo->napi);
1590
1591 return num;
1592 }
1593
1594 /* Leaves the EQ is disarmed state */
be_eq_clean(struct be_eq_obj * eqo)1595 static void be_eq_clean(struct be_eq_obj *eqo)
1596 {
1597 int num = events_get(eqo);
1598
1599 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1600 }
1601
be_rx_cq_clean(struct be_rx_obj * rxo)1602 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1603 {
1604 struct be_rx_page_info *page_info;
1605 struct be_queue_info *rxq = &rxo->q;
1606 struct be_queue_info *rx_cq = &rxo->cq;
1607 struct be_rx_compl_info *rxcp;
1608 u16 tail;
1609
1610 /* First cleanup pending rx completions */
1611 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1612 be_rx_compl_discard(rxo, rxcp);
1613 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1614 }
1615
1616 /* Then free posted rx buffer that were not used */
1617 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1618 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1619 page_info = get_rx_page_info(rxo, tail);
1620 put_page(page_info->page);
1621 memset(page_info, 0, sizeof(*page_info));
1622 }
1623 BUG_ON(atomic_read(&rxq->used));
1624 rxq->tail = rxq->head = 0;
1625 }
1626
be_tx_compl_clean(struct be_adapter * adapter)1627 static void be_tx_compl_clean(struct be_adapter *adapter)
1628 {
1629 struct be_tx_obj *txo;
1630 struct be_queue_info *txq;
1631 struct be_eth_tx_compl *txcp;
1632 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1633 struct sk_buff *sent_skb;
1634 bool dummy_wrb;
1635 int i, pending_txqs;
1636
1637 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1638 do {
1639 pending_txqs = adapter->num_tx_qs;
1640
1641 for_all_tx_queues(adapter, txo, i) {
1642 txq = &txo->q;
1643 while ((txcp = be_tx_compl_get(&txo->cq))) {
1644 end_idx =
1645 AMAP_GET_BITS(struct amap_eth_tx_compl,
1646 wrb_index, txcp);
1647 num_wrbs += be_tx_compl_process(adapter, txo,
1648 end_idx);
1649 cmpl++;
1650 }
1651 if (cmpl) {
1652 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1653 atomic_sub(num_wrbs, &txq->used);
1654 cmpl = 0;
1655 num_wrbs = 0;
1656 }
1657 if (atomic_read(&txq->used) == 0)
1658 pending_txqs--;
1659 }
1660
1661 if (pending_txqs == 0 || ++timeo > 200)
1662 break;
1663
1664 mdelay(1);
1665 } while (true);
1666
1667 for_all_tx_queues(adapter, txo, i) {
1668 txq = &txo->q;
1669 if (atomic_read(&txq->used))
1670 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1671 atomic_read(&txq->used));
1672
1673 /* free posted tx for which compls will never arrive */
1674 while (atomic_read(&txq->used)) {
1675 sent_skb = txo->sent_skb_list[txq->tail];
1676 end_idx = txq->tail;
1677 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1678 &dummy_wrb);
1679 index_adv(&end_idx, num_wrbs - 1, txq->len);
1680 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1681 atomic_sub(num_wrbs, &txq->used);
1682 }
1683 }
1684 }
1685
be_evt_queues_destroy(struct be_adapter * adapter)1686 static void be_evt_queues_destroy(struct be_adapter *adapter)
1687 {
1688 struct be_eq_obj *eqo;
1689 int i;
1690
1691 for_all_evt_queues(adapter, eqo, i) {
1692 be_eq_clean(eqo);
1693 if (eqo->q.created)
1694 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1695 be_queue_free(adapter, &eqo->q);
1696 }
1697 }
1698
be_evt_queues_create(struct be_adapter * adapter)1699 static int be_evt_queues_create(struct be_adapter *adapter)
1700 {
1701 struct be_queue_info *eq;
1702 struct be_eq_obj *eqo;
1703 int i, rc;
1704
1705 adapter->num_evt_qs = num_irqs(adapter);
1706
1707 for_all_evt_queues(adapter, eqo, i) {
1708 eqo->adapter = adapter;
1709 eqo->tx_budget = BE_TX_BUDGET;
1710 eqo->idx = i;
1711 eqo->max_eqd = BE_MAX_EQD;
1712 eqo->enable_aic = true;
1713
1714 eq = &eqo->q;
1715 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1716 sizeof(struct be_eq_entry));
1717 if (rc)
1718 return rc;
1719
1720 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1721 if (rc)
1722 return rc;
1723 }
1724 return 0;
1725 }
1726
be_mcc_queues_destroy(struct be_adapter * adapter)1727 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1728 {
1729 struct be_queue_info *q;
1730
1731 q = &adapter->mcc_obj.q;
1732 if (q->created)
1733 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1734 be_queue_free(adapter, q);
1735
1736 q = &adapter->mcc_obj.cq;
1737 if (q->created)
1738 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1739 be_queue_free(adapter, q);
1740 }
1741
1742 /* Must be called only after TX qs are created as MCC shares TX EQ */
be_mcc_queues_create(struct be_adapter * adapter)1743 static int be_mcc_queues_create(struct be_adapter *adapter)
1744 {
1745 struct be_queue_info *q, *cq;
1746
1747 cq = &adapter->mcc_obj.cq;
1748 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1749 sizeof(struct be_mcc_compl)))
1750 goto err;
1751
1752 /* Use the default EQ for MCC completions */
1753 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1754 goto mcc_cq_free;
1755
1756 q = &adapter->mcc_obj.q;
1757 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1758 goto mcc_cq_destroy;
1759
1760 if (be_cmd_mccq_create(adapter, q, cq))
1761 goto mcc_q_free;
1762
1763 return 0;
1764
1765 mcc_q_free:
1766 be_queue_free(adapter, q);
1767 mcc_cq_destroy:
1768 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1769 mcc_cq_free:
1770 be_queue_free(adapter, cq);
1771 err:
1772 return -1;
1773 }
1774
be_tx_queues_destroy(struct be_adapter * adapter)1775 static void be_tx_queues_destroy(struct be_adapter *adapter)
1776 {
1777 struct be_queue_info *q;
1778 struct be_tx_obj *txo;
1779 u8 i;
1780
1781 for_all_tx_queues(adapter, txo, i) {
1782 q = &txo->q;
1783 if (q->created)
1784 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1785 be_queue_free(adapter, q);
1786
1787 q = &txo->cq;
1788 if (q->created)
1789 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1790 be_queue_free(adapter, q);
1791 }
1792 }
1793
be_num_txqs_want(struct be_adapter * adapter)1794 static int be_num_txqs_want(struct be_adapter *adapter)
1795 {
1796 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1797 lancer_chip(adapter) || !be_physfn(adapter) ||
1798 adapter->generation == BE_GEN2)
1799 return 1;
1800 else
1801 return MAX_TX_QS;
1802 }
1803
be_tx_cqs_create(struct be_adapter * adapter)1804 static int be_tx_cqs_create(struct be_adapter *adapter)
1805 {
1806 struct be_queue_info *cq, *eq;
1807 int status;
1808 struct be_tx_obj *txo;
1809 u8 i;
1810
1811 adapter->num_tx_qs = be_num_txqs_want(adapter);
1812 if (adapter->num_tx_qs != MAX_TX_QS) {
1813 rtnl_lock();
1814 netif_set_real_num_tx_queues(adapter->netdev,
1815 adapter->num_tx_qs);
1816 rtnl_unlock();
1817 }
1818
1819 for_all_tx_queues(adapter, txo, i) {
1820 cq = &txo->cq;
1821 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1822 sizeof(struct be_eth_tx_compl));
1823 if (status)
1824 return status;
1825
1826 /* If num_evt_qs is less than num_tx_qs, then more than
1827 * one txq share an eq
1828 */
1829 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1830 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1831 if (status)
1832 return status;
1833 }
1834 return 0;
1835 }
1836
be_tx_qs_create(struct be_adapter * adapter)1837 static int be_tx_qs_create(struct be_adapter *adapter)
1838 {
1839 struct be_tx_obj *txo;
1840 int i, status;
1841
1842 for_all_tx_queues(adapter, txo, i) {
1843 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1844 sizeof(struct be_eth_wrb));
1845 if (status)
1846 return status;
1847
1848 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1849 if (status)
1850 return status;
1851 }
1852
1853 return 0;
1854 }
1855
be_rx_cqs_destroy(struct be_adapter * adapter)1856 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1857 {
1858 struct be_queue_info *q;
1859 struct be_rx_obj *rxo;
1860 int i;
1861
1862 for_all_rx_queues(adapter, rxo, i) {
1863 q = &rxo->cq;
1864 if (q->created)
1865 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1866 be_queue_free(adapter, q);
1867 }
1868 }
1869
be_rx_cqs_create(struct be_adapter * adapter)1870 static int be_rx_cqs_create(struct be_adapter *adapter)
1871 {
1872 struct be_queue_info *eq, *cq;
1873 struct be_rx_obj *rxo;
1874 int rc, i;
1875
1876 /* We'll create as many RSS rings as there are irqs.
1877 * But when there's only one irq there's no use creating RSS rings
1878 */
1879 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1880 num_irqs(adapter) + 1 : 1;
1881
1882 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1883 for_all_rx_queues(adapter, rxo, i) {
1884 rxo->adapter = adapter;
1885 cq = &rxo->cq;
1886 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1887 sizeof(struct be_eth_rx_compl));
1888 if (rc)
1889 return rc;
1890
1891 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1892 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1893 if (rc)
1894 return rc;
1895 }
1896
1897 if (adapter->num_rx_qs != MAX_RX_QS)
1898 dev_info(&adapter->pdev->dev,
1899 "Created only %d receive queues", adapter->num_rx_qs);
1900
1901 return 0;
1902 }
1903
be_intx(int irq,void * dev)1904 static irqreturn_t be_intx(int irq, void *dev)
1905 {
1906 struct be_adapter *adapter = dev;
1907 int num_evts;
1908
1909 /* With INTx only one EQ is used */
1910 num_evts = event_handle(&adapter->eq_obj[0]);
1911 if (num_evts)
1912 return IRQ_HANDLED;
1913 else
1914 return IRQ_NONE;
1915 }
1916
be_msix(int irq,void * dev)1917 static irqreturn_t be_msix(int irq, void *dev)
1918 {
1919 struct be_eq_obj *eqo = dev;
1920
1921 event_handle(eqo);
1922 return IRQ_HANDLED;
1923 }
1924
do_gro(struct be_rx_compl_info * rxcp)1925 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1926 {
1927 return (rxcp->tcpf && !rxcp->err) ? true : false;
1928 }
1929
be_process_rx(struct be_rx_obj * rxo,struct napi_struct * napi,int budget)1930 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1931 int budget)
1932 {
1933 struct be_adapter *adapter = rxo->adapter;
1934 struct be_queue_info *rx_cq = &rxo->cq;
1935 struct be_rx_compl_info *rxcp;
1936 u32 work_done;
1937
1938 for (work_done = 0; work_done < budget; work_done++) {
1939 rxcp = be_rx_compl_get(rxo);
1940 if (!rxcp)
1941 break;
1942
1943 /* Is it a flush compl that has no data */
1944 if (unlikely(rxcp->num_rcvd == 0))
1945 goto loop_continue;
1946
1947 /* Discard compl with partial DMA Lancer B0 */
1948 if (unlikely(!rxcp->pkt_size)) {
1949 be_rx_compl_discard(rxo, rxcp);
1950 goto loop_continue;
1951 }
1952
1953 /* On BE drop pkts that arrive due to imperfect filtering in
1954 * promiscuous mode on some skews
1955 */
1956 if (unlikely(rxcp->port != adapter->port_num &&
1957 !lancer_chip(adapter))) {
1958 be_rx_compl_discard(rxo, rxcp);
1959 goto loop_continue;
1960 }
1961
1962 if (do_gro(rxcp))
1963 be_rx_compl_process_gro(rxo, napi, rxcp);
1964 else
1965 be_rx_compl_process(rxo, rxcp);
1966 loop_continue:
1967 be_rx_stats_update(rxo, rxcp);
1968 }
1969
1970 if (work_done) {
1971 be_cq_notify(adapter, rx_cq->id, true, work_done);
1972
1973 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1974 be_post_rx_frags(rxo, GFP_ATOMIC);
1975 }
1976
1977 return work_done;
1978 }
1979
be_process_tx(struct be_adapter * adapter,struct be_tx_obj * txo,int budget,int idx)1980 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1981 int budget, int idx)
1982 {
1983 struct be_eth_tx_compl *txcp;
1984 int num_wrbs = 0, work_done;
1985
1986 for (work_done = 0; work_done < budget; work_done++) {
1987 txcp = be_tx_compl_get(&txo->cq);
1988 if (!txcp)
1989 break;
1990 num_wrbs += be_tx_compl_process(adapter, txo,
1991 AMAP_GET_BITS(struct amap_eth_tx_compl,
1992 wrb_index, txcp));
1993 }
1994
1995 if (work_done) {
1996 be_cq_notify(adapter, txo->cq.id, true, work_done);
1997 atomic_sub(num_wrbs, &txo->q.used);
1998
1999 /* As Tx wrbs have been freed up, wake up netdev queue
2000 * if it was stopped due to lack of tx wrbs. */
2001 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2002 atomic_read(&txo->q.used) < txo->q.len / 2) {
2003 netif_wake_subqueue(adapter->netdev, idx);
2004 }
2005
2006 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2007 tx_stats(txo)->tx_compl += work_done;
2008 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2009 }
2010 return (work_done < budget); /* Done */
2011 }
2012
be_poll(struct napi_struct * napi,int budget)2013 int be_poll(struct napi_struct *napi, int budget)
2014 {
2015 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2016 struct be_adapter *adapter = eqo->adapter;
2017 int max_work = 0, work, i;
2018 bool tx_done;
2019
2020 /* Process all TXQs serviced by this EQ */
2021 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2022 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2023 eqo->tx_budget, i);
2024 if (!tx_done)
2025 max_work = budget;
2026 }
2027
2028 /* This loop will iterate twice for EQ0 in which
2029 * completions of the last RXQ (default one) are also processed
2030 * For other EQs the loop iterates only once
2031 */
2032 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2033 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2034 max_work = max(work, max_work);
2035 }
2036
2037 if (is_mcc_eqo(eqo))
2038 be_process_mcc(adapter);
2039
2040 if (max_work < budget) {
2041 napi_complete(napi);
2042 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2043 } else {
2044 /* As we'll continue in polling mode, count and clear events */
2045 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2046 }
2047 return max_work;
2048 }
2049
be_detect_dump_ue(struct be_adapter * adapter)2050 void be_detect_dump_ue(struct be_adapter *adapter)
2051 {
2052 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2053 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2054 u32 i;
2055
2056 if (adapter->eeh_err || adapter->ue_detected)
2057 return;
2058
2059 if (lancer_chip(adapter)) {
2060 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2061 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2062 sliport_err1 = ioread32(adapter->db +
2063 SLIPORT_ERROR1_OFFSET);
2064 sliport_err2 = ioread32(adapter->db +
2065 SLIPORT_ERROR2_OFFSET);
2066 }
2067 } else {
2068 pci_read_config_dword(adapter->pdev,
2069 PCICFG_UE_STATUS_LOW, &ue_lo);
2070 pci_read_config_dword(adapter->pdev,
2071 PCICFG_UE_STATUS_HIGH, &ue_hi);
2072 pci_read_config_dword(adapter->pdev,
2073 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2074 pci_read_config_dword(adapter->pdev,
2075 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2076
2077 ue_lo = (ue_lo & (~ue_lo_mask));
2078 ue_hi = (ue_hi & (~ue_hi_mask));
2079 }
2080
2081 if (ue_lo || ue_hi ||
2082 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083 adapter->ue_detected = true;
2084 adapter->eeh_err = true;
2085 dev_err(&adapter->pdev->dev,
2086 "Unrecoverable error in the card\n");
2087 }
2088
2089 if (ue_lo) {
2090 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2091 if (ue_lo & 1)
2092 dev_err(&adapter->pdev->dev,
2093 "UE: %s bit set\n", ue_status_low_desc[i]);
2094 }
2095 }
2096 if (ue_hi) {
2097 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2098 if (ue_hi & 1)
2099 dev_err(&adapter->pdev->dev,
2100 "UE: %s bit set\n", ue_status_hi_desc[i]);
2101 }
2102 }
2103
2104 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2105 dev_err(&adapter->pdev->dev,
2106 "sliport status 0x%x\n", sliport_status);
2107 dev_err(&adapter->pdev->dev,
2108 "sliport error1 0x%x\n", sliport_err1);
2109 dev_err(&adapter->pdev->dev,
2110 "sliport error2 0x%x\n", sliport_err2);
2111 }
2112 }
2113
be_msix_disable(struct be_adapter * adapter)2114 static void be_msix_disable(struct be_adapter *adapter)
2115 {
2116 if (msix_enabled(adapter)) {
2117 pci_disable_msix(adapter->pdev);
2118 adapter->num_msix_vec = 0;
2119 }
2120 }
2121
be_num_rss_want(struct be_adapter * adapter)2122 static uint be_num_rss_want(struct be_adapter *adapter)
2123 {
2124 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2125 adapter->num_vfs == 0 && be_physfn(adapter) &&
2126 !be_is_mc(adapter))
2127 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2128 else
2129 return 0;
2130 }
2131
be_msix_enable(struct be_adapter * adapter)2132 static void be_msix_enable(struct be_adapter *adapter)
2133 {
2134 #define BE_MIN_MSIX_VECTORS 1
2135 int i, status, num_vec;
2136
2137 /* If RSS queues are not used, need a vec for default RX Q */
2138 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2139 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2140
2141 for (i = 0; i < num_vec; i++)
2142 adapter->msix_entries[i].entry = i;
2143
2144 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2145 if (status == 0) {
2146 goto done;
2147 } else if (status >= BE_MIN_MSIX_VECTORS) {
2148 num_vec = status;
2149 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2150 num_vec) == 0)
2151 goto done;
2152 }
2153 return;
2154 done:
2155 adapter->num_msix_vec = num_vec;
2156 return;
2157 }
2158
be_sriov_enable(struct be_adapter * adapter)2159 static int be_sriov_enable(struct be_adapter *adapter)
2160 {
2161 be_check_sriov_fn_type(adapter);
2162
2163 #ifdef CONFIG_PCI_IOV
2164 if (be_physfn(adapter) && num_vfs) {
2165 int status, pos;
2166 u16 dev_vfs;
2167
2168 pos = pci_find_ext_capability(adapter->pdev,
2169 PCI_EXT_CAP_ID_SRIOV);
2170 pci_read_config_word(adapter->pdev,
2171 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2172
2173 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2174 if (adapter->num_vfs != num_vfs)
2175 dev_info(&adapter->pdev->dev,
2176 "Device supports %d VFs and not %d\n",
2177 adapter->num_vfs, num_vfs);
2178
2179 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2180 if (status)
2181 adapter->num_vfs = 0;
2182
2183 if (adapter->num_vfs) {
2184 adapter->vf_cfg = kcalloc(num_vfs,
2185 sizeof(struct be_vf_cfg),
2186 GFP_KERNEL);
2187 if (!adapter->vf_cfg)
2188 return -ENOMEM;
2189 }
2190 }
2191 #endif
2192 return 0;
2193 }
2194
be_sriov_disable(struct be_adapter * adapter)2195 static void be_sriov_disable(struct be_adapter *adapter)
2196 {
2197 #ifdef CONFIG_PCI_IOV
2198 if (sriov_enabled(adapter)) {
2199 pci_disable_sriov(adapter->pdev);
2200 kfree(adapter->vf_cfg);
2201 adapter->num_vfs = 0;
2202 }
2203 #endif
2204 }
2205
be_msix_vec_get(struct be_adapter * adapter,struct be_eq_obj * eqo)2206 static inline int be_msix_vec_get(struct be_adapter *adapter,
2207 struct be_eq_obj *eqo)
2208 {
2209 return adapter->msix_entries[eqo->idx].vector;
2210 }
2211
be_msix_register(struct be_adapter * adapter)2212 static int be_msix_register(struct be_adapter *adapter)
2213 {
2214 struct net_device *netdev = adapter->netdev;
2215 struct be_eq_obj *eqo;
2216 int status, i, vec;
2217
2218 for_all_evt_queues(adapter, eqo, i) {
2219 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2220 vec = be_msix_vec_get(adapter, eqo);
2221 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2222 if (status)
2223 goto err_msix;
2224 }
2225
2226 return 0;
2227 err_msix:
2228 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2229 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2230 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2231 status);
2232 be_msix_disable(adapter);
2233 return status;
2234 }
2235
be_irq_register(struct be_adapter * adapter)2236 static int be_irq_register(struct be_adapter *adapter)
2237 {
2238 struct net_device *netdev = adapter->netdev;
2239 int status;
2240
2241 if (msix_enabled(adapter)) {
2242 status = be_msix_register(adapter);
2243 if (status == 0)
2244 goto done;
2245 /* INTx is not supported for VF */
2246 if (!be_physfn(adapter))
2247 return status;
2248 }
2249
2250 /* INTx */
2251 netdev->irq = adapter->pdev->irq;
2252 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2253 adapter);
2254 if (status) {
2255 dev_err(&adapter->pdev->dev,
2256 "INTx request IRQ failed - err %d\n", status);
2257 return status;
2258 }
2259 done:
2260 adapter->isr_registered = true;
2261 return 0;
2262 }
2263
be_irq_unregister(struct be_adapter * adapter)2264 static void be_irq_unregister(struct be_adapter *adapter)
2265 {
2266 struct net_device *netdev = adapter->netdev;
2267 struct be_eq_obj *eqo;
2268 int i;
2269
2270 if (!adapter->isr_registered)
2271 return;
2272
2273 /* INTx */
2274 if (!msix_enabled(adapter)) {
2275 free_irq(netdev->irq, adapter);
2276 goto done;
2277 }
2278
2279 /* MSIx */
2280 for_all_evt_queues(adapter, eqo, i)
2281 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2282
2283 done:
2284 adapter->isr_registered = false;
2285 }
2286
be_rx_qs_destroy(struct be_adapter * adapter)2287 static void be_rx_qs_destroy(struct be_adapter *adapter)
2288 {
2289 struct be_queue_info *q;
2290 struct be_rx_obj *rxo;
2291 int i;
2292
2293 for_all_rx_queues(adapter, rxo, i) {
2294 q = &rxo->q;
2295 if (q->created) {
2296 be_cmd_rxq_destroy(adapter, q);
2297 /* After the rxq is invalidated, wait for a grace time
2298 * of 1ms for all dma to end and the flush compl to
2299 * arrive
2300 */
2301 mdelay(1);
2302 be_rx_cq_clean(rxo);
2303 }
2304 be_queue_free(adapter, q);
2305 }
2306 }
2307
be_close(struct net_device * netdev)2308 static int be_close(struct net_device *netdev)
2309 {
2310 struct be_adapter *adapter = netdev_priv(netdev);
2311 struct be_eq_obj *eqo;
2312 int i;
2313
2314 be_async_mcc_disable(adapter);
2315
2316 if (!lancer_chip(adapter))
2317 be_intr_set(adapter, false);
2318
2319 for_all_evt_queues(adapter, eqo, i) {
2320 napi_disable(&eqo->napi);
2321 if (msix_enabled(adapter))
2322 synchronize_irq(be_msix_vec_get(adapter, eqo));
2323 else
2324 synchronize_irq(netdev->irq);
2325 be_eq_clean(eqo);
2326 }
2327
2328 be_irq_unregister(adapter);
2329
2330 /* Wait for all pending tx completions to arrive so that
2331 * all tx skbs are freed.
2332 */
2333 be_tx_compl_clean(adapter);
2334
2335 be_rx_qs_destroy(adapter);
2336 return 0;
2337 }
2338
be_rx_qs_create(struct be_adapter * adapter)2339 static int be_rx_qs_create(struct be_adapter *adapter)
2340 {
2341 struct be_rx_obj *rxo;
2342 int rc, i, j;
2343 u8 rsstable[128];
2344
2345 for_all_rx_queues(adapter, rxo, i) {
2346 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2347 sizeof(struct be_eth_rx_d));
2348 if (rc)
2349 return rc;
2350 }
2351
2352 /* The FW would like the default RXQ to be created first */
2353 rxo = default_rxo(adapter);
2354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2355 adapter->if_handle, false, &rxo->rss_id);
2356 if (rc)
2357 return rc;
2358
2359 for_all_rss_queues(adapter, rxo, i) {
2360 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2361 rx_frag_size, adapter->if_handle,
2362 true, &rxo->rss_id);
2363 if (rc)
2364 return rc;
2365 }
2366
2367 if (be_multi_rxq(adapter)) {
2368 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2369 for_all_rss_queues(adapter, rxo, i) {
2370 if ((j + i) >= 128)
2371 break;
2372 rsstable[j + i] = rxo->rss_id;
2373 }
2374 }
2375 rc = be_cmd_rss_config(adapter, rsstable, 128);
2376 if (rc)
2377 return rc;
2378 }
2379
2380 /* First time posting */
2381 for_all_rx_queues(adapter, rxo, i)
2382 be_post_rx_frags(rxo, GFP_KERNEL);
2383 return 0;
2384 }
2385
be_open(struct net_device * netdev)2386 static int be_open(struct net_device *netdev)
2387 {
2388 struct be_adapter *adapter = netdev_priv(netdev);
2389 struct be_eq_obj *eqo;
2390 struct be_rx_obj *rxo;
2391 struct be_tx_obj *txo;
2392 u8 link_status;
2393 int status, i;
2394
2395 status = be_rx_qs_create(adapter);
2396 if (status)
2397 goto err;
2398
2399 be_irq_register(adapter);
2400
2401 if (!lancer_chip(adapter))
2402 be_intr_set(adapter, true);
2403
2404 for_all_rx_queues(adapter, rxo, i)
2405 be_cq_notify(adapter, rxo->cq.id, true, 0);
2406
2407 for_all_tx_queues(adapter, txo, i)
2408 be_cq_notify(adapter, txo->cq.id, true, 0);
2409
2410 be_async_mcc_enable(adapter);
2411
2412 for_all_evt_queues(adapter, eqo, i) {
2413 napi_enable(&eqo->napi);
2414 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2415 }
2416
2417 status = be_cmd_link_status_query(adapter, NULL, NULL,
2418 &link_status, 0);
2419 if (!status)
2420 be_link_status_update(adapter, link_status);
2421
2422 return 0;
2423 err:
2424 be_close(adapter->netdev);
2425 return -EIO;
2426 }
2427
be_setup_wol(struct be_adapter * adapter,bool enable)2428 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2429 {
2430 struct be_dma_mem cmd;
2431 int status = 0;
2432 u8 mac[ETH_ALEN];
2433
2434 memset(mac, 0, ETH_ALEN);
2435
2436 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2437 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2438 GFP_KERNEL);
2439 if (cmd.va == NULL)
2440 return -1;
2441 memset(cmd.va, 0, cmd.size);
2442
2443 if (enable) {
2444 status = pci_write_config_dword(adapter->pdev,
2445 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2446 if (status) {
2447 dev_err(&adapter->pdev->dev,
2448 "Could not enable Wake-on-lan\n");
2449 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2450 cmd.dma);
2451 return status;
2452 }
2453 status = be_cmd_enable_magic_wol(adapter,
2454 adapter->netdev->dev_addr, &cmd);
2455 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2456 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2457 } else {
2458 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2459 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2460 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2461 }
2462
2463 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2464 return status;
2465 }
2466
2467 /*
2468 * Generate a seed MAC address from the PF MAC Address using jhash.
2469 * MAC Address for VFs are assigned incrementally starting from the seed.
2470 * These addresses are programmed in the ASIC by the PF and the VF driver
2471 * queries for the MAC address during its probe.
2472 */
be_vf_eth_addr_config(struct be_adapter * adapter)2473 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2474 {
2475 u32 vf;
2476 int status = 0;
2477 u8 mac[ETH_ALEN];
2478 struct be_vf_cfg *vf_cfg;
2479
2480 be_vf_eth_addr_generate(adapter, mac);
2481
2482 for_all_vfs(adapter, vf_cfg, vf) {
2483 if (lancer_chip(adapter)) {
2484 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2485 } else {
2486 status = be_cmd_pmac_add(adapter, mac,
2487 vf_cfg->if_handle,
2488 &vf_cfg->pmac_id, vf + 1);
2489 }
2490
2491 if (status)
2492 dev_err(&adapter->pdev->dev,
2493 "Mac address assignment failed for VF %d\n", vf);
2494 else
2495 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2496
2497 mac[5] += 1;
2498 }
2499 return status;
2500 }
2501
be_vf_clear(struct be_adapter * adapter)2502 static void be_vf_clear(struct be_adapter *adapter)
2503 {
2504 struct be_vf_cfg *vf_cfg;
2505 u32 vf;
2506
2507 for_all_vfs(adapter, vf_cfg, vf) {
2508 if (lancer_chip(adapter))
2509 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2510 else
2511 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2512 vf_cfg->pmac_id, vf + 1);
2513
2514 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2515 }
2516 }
2517
be_clear(struct be_adapter * adapter)2518 static int be_clear(struct be_adapter *adapter)
2519 {
2520 int i = 1;
2521
2522 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2523 cancel_delayed_work_sync(&adapter->work);
2524 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2525 }
2526
2527 if (sriov_enabled(adapter))
2528 be_vf_clear(adapter);
2529
2530 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2531 be_cmd_pmac_del(adapter, adapter->if_handle,
2532 adapter->pmac_id[i], 0);
2533
2534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2535
2536 be_mcc_queues_destroy(adapter);
2537 be_rx_cqs_destroy(adapter);
2538 be_tx_queues_destroy(adapter);
2539 be_evt_queues_destroy(adapter);
2540
2541 /* tell fw we're done with firing cmds */
2542 be_cmd_fw_clean(adapter);
2543
2544 be_msix_disable(adapter);
2545 kfree(adapter->pmac_id);
2546 return 0;
2547 }
2548
be_vf_setup_init(struct be_adapter * adapter)2549 static void be_vf_setup_init(struct be_adapter *adapter)
2550 {
2551 struct be_vf_cfg *vf_cfg;
2552 int vf;
2553
2554 for_all_vfs(adapter, vf_cfg, vf) {
2555 vf_cfg->if_handle = -1;
2556 vf_cfg->pmac_id = -1;
2557 }
2558 }
2559
be_vf_setup(struct be_adapter * adapter)2560 static int be_vf_setup(struct be_adapter *adapter)
2561 {
2562 struct be_vf_cfg *vf_cfg;
2563 u32 cap_flags, en_flags, vf;
2564 u16 def_vlan, lnk_speed;
2565 int status;
2566
2567 be_vf_setup_init(adapter);
2568
2569 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2570 BE_IF_FLAGS_MULTICAST;
2571 for_all_vfs(adapter, vf_cfg, vf) {
2572 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2573 &vf_cfg->if_handle, NULL, vf + 1);
2574 if (status)
2575 goto err;
2576 }
2577
2578 status = be_vf_eth_addr_config(adapter);
2579 if (status)
2580 goto err;
2581
2582 for_all_vfs(adapter, vf_cfg, vf) {
2583 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2584 NULL, vf + 1);
2585 if (status)
2586 goto err;
2587 vf_cfg->tx_rate = lnk_speed * 10;
2588
2589 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2590 vf + 1, vf_cfg->if_handle);
2591 if (status)
2592 goto err;
2593 vf_cfg->def_vid = def_vlan;
2594 }
2595 return 0;
2596 err:
2597 return status;
2598 }
2599
be_setup_init(struct be_adapter * adapter)2600 static void be_setup_init(struct be_adapter *adapter)
2601 {
2602 adapter->vlan_prio_bmap = 0xff;
2603 adapter->link_speed = -1;
2604 adapter->if_handle = -1;
2605 adapter->be3_native = false;
2606 adapter->promiscuous = false;
2607 adapter->eq_next_idx = 0;
2608 }
2609
be_add_mac_from_list(struct be_adapter * adapter,u8 * mac)2610 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2611 {
2612 u32 pmac_id;
2613 int status;
2614 bool pmac_id_active;
2615
2616 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2617 &pmac_id, mac);
2618 if (status != 0)
2619 goto do_none;
2620
2621 if (pmac_id_active) {
2622 status = be_cmd_mac_addr_query(adapter, mac,
2623 MAC_ADDRESS_TYPE_NETWORK,
2624 false, adapter->if_handle, pmac_id);
2625
2626 if (!status)
2627 adapter->pmac_id[0] = pmac_id;
2628 } else {
2629 status = be_cmd_pmac_add(adapter, mac,
2630 adapter->if_handle, &adapter->pmac_id[0], 0);
2631 }
2632 do_none:
2633 return status;
2634 }
2635
be_setup(struct be_adapter * adapter)2636 static int be_setup(struct be_adapter *adapter)
2637 {
2638 struct net_device *netdev = adapter->netdev;
2639 u32 cap_flags, en_flags;
2640 u32 tx_fc, rx_fc;
2641 int status;
2642 u8 mac[ETH_ALEN];
2643
2644 be_setup_init(adapter);
2645
2646 be_cmd_req_native_mode(adapter);
2647
2648 be_msix_enable(adapter);
2649
2650 status = be_evt_queues_create(adapter);
2651 if (status)
2652 goto err;
2653
2654 status = be_tx_cqs_create(adapter);
2655 if (status)
2656 goto err;
2657
2658 status = be_rx_cqs_create(adapter);
2659 if (status)
2660 goto err;
2661
2662 status = be_mcc_queues_create(adapter);
2663 if (status)
2664 goto err;
2665
2666 memset(mac, 0, ETH_ALEN);
2667 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2668 true /*permanent */, 0, 0);
2669 if (status)
2670 return status;
2671 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2672 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2673
2674 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2675 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2676 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2677 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2678
2679 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2680 cap_flags |= BE_IF_FLAGS_RSS;
2681 en_flags |= BE_IF_FLAGS_RSS;
2682 }
2683 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2684 netdev->dev_addr, &adapter->if_handle,
2685 &adapter->pmac_id[0], 0);
2686 if (status != 0)
2687 goto err;
2688
2689 /* The VF's permanent mac queried from card is incorrect.
2690 * For BEx: Query the mac configued by the PF using if_handle
2691 * For Lancer: Get and use mac_list to obtain mac address.
2692 */
2693 if (!be_physfn(adapter)) {
2694 if (lancer_chip(adapter))
2695 status = be_add_mac_from_list(adapter, mac);
2696 else
2697 status = be_cmd_mac_addr_query(adapter, mac,
2698 MAC_ADDRESS_TYPE_NETWORK, false,
2699 adapter->if_handle, 0);
2700 if (!status) {
2701 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2702 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2703 }
2704 }
2705
2706 status = be_tx_qs_create(adapter);
2707 if (status)
2708 goto err;
2709
2710 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2711
2712 status = be_vid_config(adapter, false, 0);
2713 if (status)
2714 goto err;
2715
2716 be_set_rx_mode(adapter->netdev);
2717
2718 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2719 /* For Lancer: It is legal for this cmd to fail on VF */
2720 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2721 goto err;
2722
2723 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2724 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2725 adapter->rx_fc);
2726 /* For Lancer: It is legal for this cmd to fail on VF */
2727 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2728 goto err;
2729 }
2730
2731 pcie_set_readrq(adapter->pdev, 4096);
2732
2733 if (sriov_enabled(adapter)) {
2734 status = be_vf_setup(adapter);
2735 if (status)
2736 goto err;
2737 }
2738
2739 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2740 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2741
2742 return 0;
2743 err:
2744 be_clear(adapter);
2745 return status;
2746 }
2747
2748 #ifdef CONFIG_NET_POLL_CONTROLLER
be_netpoll(struct net_device * netdev)2749 static void be_netpoll(struct net_device *netdev)
2750 {
2751 struct be_adapter *adapter = netdev_priv(netdev);
2752 struct be_eq_obj *eqo;
2753 int i;
2754
2755 for_all_evt_queues(adapter, eqo, i)
2756 event_handle(eqo);
2757
2758 return;
2759 }
2760 #endif
2761
2762 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
be_flash_redboot(struct be_adapter * adapter,const u8 * p,u32 img_start,int image_size,int hdr_size)2763 static bool be_flash_redboot(struct be_adapter *adapter,
2764 const u8 *p, u32 img_start, int image_size,
2765 int hdr_size)
2766 {
2767 u32 crc_offset;
2768 u8 flashed_crc[4];
2769 int status;
2770
2771 crc_offset = hdr_size + img_start + image_size - 4;
2772
2773 p += crc_offset;
2774
2775 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2776 (image_size - 4));
2777 if (status) {
2778 dev_err(&adapter->pdev->dev,
2779 "could not get crc from flash, not flashing redboot\n");
2780 return false;
2781 }
2782
2783 /*update redboot only if crc does not match*/
2784 if (!memcmp(flashed_crc, p, 4))
2785 return false;
2786 else
2787 return true;
2788 }
2789
phy_flashing_required(struct be_adapter * adapter)2790 static bool phy_flashing_required(struct be_adapter *adapter)
2791 {
2792 int status = 0;
2793 struct be_phy_info phy_info;
2794
2795 status = be_cmd_get_phy_info(adapter, &phy_info);
2796 if (status)
2797 return false;
2798 if ((phy_info.phy_type == TN_8022) &&
2799 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2800 return true;
2801 }
2802 return false;
2803 }
2804
be_flash_data(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2805 static int be_flash_data(struct be_adapter *adapter,
2806 const struct firmware *fw,
2807 struct be_dma_mem *flash_cmd, int num_of_images)
2808
2809 {
2810 int status = 0, i, filehdr_size = 0;
2811 u32 total_bytes = 0, flash_op;
2812 int num_bytes;
2813 const u8 *p = fw->data;
2814 struct be_cmd_write_flashrom *req = flash_cmd->va;
2815 const struct flash_comp *pflashcomp;
2816 int num_comp;
2817
2818 static const struct flash_comp gen3_flash_types[10] = {
2819 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2820 FLASH_IMAGE_MAX_SIZE_g3},
2821 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2822 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2823 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2824 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2825 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2826 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2827 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2828 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2829 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2830 FLASH_IMAGE_MAX_SIZE_g3},
2831 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2832 FLASH_IMAGE_MAX_SIZE_g3},
2833 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2834 FLASH_IMAGE_MAX_SIZE_g3},
2835 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2836 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2837 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2838 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2839 };
2840 static const struct flash_comp gen2_flash_types[8] = {
2841 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2842 FLASH_IMAGE_MAX_SIZE_g2},
2843 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2844 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2845 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2846 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2847 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2848 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2849 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2850 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2851 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2852 FLASH_IMAGE_MAX_SIZE_g2},
2853 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2854 FLASH_IMAGE_MAX_SIZE_g2},
2855 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2856 FLASH_IMAGE_MAX_SIZE_g2}
2857 };
2858
2859 if (adapter->generation == BE_GEN3) {
2860 pflashcomp = gen3_flash_types;
2861 filehdr_size = sizeof(struct flash_file_hdr_g3);
2862 num_comp = ARRAY_SIZE(gen3_flash_types);
2863 } else {
2864 pflashcomp = gen2_flash_types;
2865 filehdr_size = sizeof(struct flash_file_hdr_g2);
2866 num_comp = ARRAY_SIZE(gen2_flash_types);
2867 }
2868 for (i = 0; i < num_comp; i++) {
2869 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2870 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2871 continue;
2872 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2873 if (!phy_flashing_required(adapter))
2874 continue;
2875 }
2876 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2877 (!be_flash_redboot(adapter, fw->data,
2878 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2879 (num_of_images * sizeof(struct image_hdr)))))
2880 continue;
2881 p = fw->data;
2882 p += filehdr_size + pflashcomp[i].offset
2883 + (num_of_images * sizeof(struct image_hdr));
2884 if (p + pflashcomp[i].size > fw->data + fw->size)
2885 return -1;
2886 total_bytes = pflashcomp[i].size;
2887 while (total_bytes) {
2888 if (total_bytes > 32*1024)
2889 num_bytes = 32*1024;
2890 else
2891 num_bytes = total_bytes;
2892 total_bytes -= num_bytes;
2893 if (!total_bytes) {
2894 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2895 flash_op = FLASHROM_OPER_PHY_FLASH;
2896 else
2897 flash_op = FLASHROM_OPER_FLASH;
2898 } else {
2899 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2900 flash_op = FLASHROM_OPER_PHY_SAVE;
2901 else
2902 flash_op = FLASHROM_OPER_SAVE;
2903 }
2904 memcpy(req->params.data_buf, p, num_bytes);
2905 p += num_bytes;
2906 status = be_cmd_write_flashrom(adapter, flash_cmd,
2907 pflashcomp[i].optype, flash_op, num_bytes);
2908 if (status) {
2909 if ((status == ILLEGAL_IOCTL_REQ) &&
2910 (pflashcomp[i].optype ==
2911 IMG_TYPE_PHY_FW))
2912 break;
2913 dev_err(&adapter->pdev->dev,
2914 "cmd to write to flash rom failed.\n");
2915 return -1;
2916 }
2917 }
2918 }
2919 return 0;
2920 }
2921
get_ufigen_type(struct flash_file_hdr_g2 * fhdr)2922 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2923 {
2924 if (fhdr == NULL)
2925 return 0;
2926 if (fhdr->build[0] == '3')
2927 return BE_GEN3;
2928 else if (fhdr->build[0] == '2')
2929 return BE_GEN2;
2930 else
2931 return 0;
2932 }
2933
lancer_fw_download(struct be_adapter * adapter,const struct firmware * fw)2934 static int lancer_fw_download(struct be_adapter *adapter,
2935 const struct firmware *fw)
2936 {
2937 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2938 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2939 struct be_dma_mem flash_cmd;
2940 const u8 *data_ptr = NULL;
2941 u8 *dest_image_ptr = NULL;
2942 size_t image_size = 0;
2943 u32 chunk_size = 0;
2944 u32 data_written = 0;
2945 u32 offset = 0;
2946 int status = 0;
2947 u8 add_status = 0;
2948
2949 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2950 dev_err(&adapter->pdev->dev,
2951 "FW Image not properly aligned. "
2952 "Length must be 4 byte aligned.\n");
2953 status = -EINVAL;
2954 goto lancer_fw_exit;
2955 }
2956
2957 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2958 + LANCER_FW_DOWNLOAD_CHUNK;
2959 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2960 &flash_cmd.dma, GFP_KERNEL);
2961 if (!flash_cmd.va) {
2962 status = -ENOMEM;
2963 dev_err(&adapter->pdev->dev,
2964 "Memory allocation failure while flashing\n");
2965 goto lancer_fw_exit;
2966 }
2967
2968 dest_image_ptr = flash_cmd.va +
2969 sizeof(struct lancer_cmd_req_write_object);
2970 image_size = fw->size;
2971 data_ptr = fw->data;
2972
2973 while (image_size) {
2974 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2975
2976 /* Copy the image chunk content. */
2977 memcpy(dest_image_ptr, data_ptr, chunk_size);
2978
2979 status = lancer_cmd_write_object(adapter, &flash_cmd,
2980 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2981 &data_written, &add_status);
2982
2983 if (status)
2984 break;
2985
2986 offset += data_written;
2987 data_ptr += data_written;
2988 image_size -= data_written;
2989 }
2990
2991 if (!status) {
2992 /* Commit the FW written */
2993 status = lancer_cmd_write_object(adapter, &flash_cmd,
2994 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2995 &data_written, &add_status);
2996 }
2997
2998 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2999 flash_cmd.dma);
3000 if (status) {
3001 dev_err(&adapter->pdev->dev,
3002 "Firmware load error. "
3003 "Status code: 0x%x Additional Status: 0x%x\n",
3004 status, add_status);
3005 goto lancer_fw_exit;
3006 }
3007
3008 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3009 lancer_fw_exit:
3010 return status;
3011 }
3012
be_fw_download(struct be_adapter * adapter,const struct firmware * fw)3013 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3014 {
3015 struct flash_file_hdr_g2 *fhdr;
3016 struct flash_file_hdr_g3 *fhdr3;
3017 struct image_hdr *img_hdr_ptr = NULL;
3018 struct be_dma_mem flash_cmd;
3019 const u8 *p;
3020 int status = 0, i = 0, num_imgs = 0;
3021
3022 p = fw->data;
3023 fhdr = (struct flash_file_hdr_g2 *) p;
3024
3025 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3026 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3027 &flash_cmd.dma, GFP_KERNEL);
3028 if (!flash_cmd.va) {
3029 status = -ENOMEM;
3030 dev_err(&adapter->pdev->dev,
3031 "Memory allocation failure while flashing\n");
3032 goto be_fw_exit;
3033 }
3034
3035 if ((adapter->generation == BE_GEN3) &&
3036 (get_ufigen_type(fhdr) == BE_GEN3)) {
3037 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3038 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3039 for (i = 0; i < num_imgs; i++) {
3040 img_hdr_ptr = (struct image_hdr *) (fw->data +
3041 (sizeof(struct flash_file_hdr_g3) +
3042 i * sizeof(struct image_hdr)));
3043 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3044 status = be_flash_data(adapter, fw, &flash_cmd,
3045 num_imgs);
3046 }
3047 } else if ((adapter->generation == BE_GEN2) &&
3048 (get_ufigen_type(fhdr) == BE_GEN2)) {
3049 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3050 } else {
3051 dev_err(&adapter->pdev->dev,
3052 "UFI and Interface are not compatible for flashing\n");
3053 status = -1;
3054 }
3055
3056 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3057 flash_cmd.dma);
3058 if (status) {
3059 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3060 goto be_fw_exit;
3061 }
3062
3063 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3064
3065 be_fw_exit:
3066 return status;
3067 }
3068
be_load_fw(struct be_adapter * adapter,u8 * fw_file)3069 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3070 {
3071 const struct firmware *fw;
3072 int status;
3073
3074 if (!netif_running(adapter->netdev)) {
3075 dev_err(&adapter->pdev->dev,
3076 "Firmware load not allowed (interface is down)\n");
3077 return -1;
3078 }
3079
3080 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3081 if (status)
3082 goto fw_exit;
3083
3084 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3085
3086 if (lancer_chip(adapter))
3087 status = lancer_fw_download(adapter, fw);
3088 else
3089 status = be_fw_download(adapter, fw);
3090
3091 fw_exit:
3092 release_firmware(fw);
3093 return status;
3094 }
3095
3096 static const struct net_device_ops be_netdev_ops = {
3097 .ndo_open = be_open,
3098 .ndo_stop = be_close,
3099 .ndo_start_xmit = be_xmit,
3100 .ndo_set_rx_mode = be_set_rx_mode,
3101 .ndo_set_mac_address = be_mac_addr_set,
3102 .ndo_change_mtu = be_change_mtu,
3103 .ndo_get_stats64 = be_get_stats64,
3104 .ndo_validate_addr = eth_validate_addr,
3105 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3106 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3107 .ndo_set_vf_mac = be_set_vf_mac,
3108 .ndo_set_vf_vlan = be_set_vf_vlan,
3109 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3110 .ndo_get_vf_config = be_get_vf_config,
3111 #ifdef CONFIG_NET_POLL_CONTROLLER
3112 .ndo_poll_controller = be_netpoll,
3113 #endif
3114 };
3115
be_netdev_init(struct net_device * netdev)3116 static void be_netdev_init(struct net_device *netdev)
3117 {
3118 struct be_adapter *adapter = netdev_priv(netdev);
3119 struct be_eq_obj *eqo;
3120 int i;
3121
3122 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3123 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3124 NETIF_F_HW_VLAN_TX;
3125 if (be_multi_rxq(adapter))
3126 netdev->hw_features |= NETIF_F_RXHASH;
3127
3128 netdev->features |= netdev->hw_features |
3129 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3130
3131 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3132 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3133
3134 netdev->priv_flags |= IFF_UNICAST_FLT;
3135
3136 netdev->flags |= IFF_MULTICAST;
3137
3138 netif_set_gso_max_size(netdev, 65535);
3139
3140 netdev->netdev_ops = &be_netdev_ops;
3141
3142 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3143
3144 for_all_evt_queues(adapter, eqo, i)
3145 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3146 }
3147
be_unmap_pci_bars(struct be_adapter * adapter)3148 static void be_unmap_pci_bars(struct be_adapter *adapter)
3149 {
3150 if (adapter->csr)
3151 iounmap(adapter->csr);
3152 if (adapter->db)
3153 iounmap(adapter->db);
3154 }
3155
be_map_pci_bars(struct be_adapter * adapter)3156 static int be_map_pci_bars(struct be_adapter *adapter)
3157 {
3158 u8 __iomem *addr;
3159 int db_reg;
3160
3161 if (lancer_chip(adapter)) {
3162 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3163 pci_resource_len(adapter->pdev, 0));
3164 if (addr == NULL)
3165 return -ENOMEM;
3166 adapter->db = addr;
3167 return 0;
3168 }
3169
3170 if (be_physfn(adapter)) {
3171 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3172 pci_resource_len(adapter->pdev, 2));
3173 if (addr == NULL)
3174 return -ENOMEM;
3175 adapter->csr = addr;
3176 }
3177
3178 if (adapter->generation == BE_GEN2) {
3179 db_reg = 4;
3180 } else {
3181 if (be_physfn(adapter))
3182 db_reg = 4;
3183 else
3184 db_reg = 0;
3185 }
3186 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3187 pci_resource_len(adapter->pdev, db_reg));
3188 if (addr == NULL)
3189 goto pci_map_err;
3190 adapter->db = addr;
3191
3192 return 0;
3193 pci_map_err:
3194 be_unmap_pci_bars(adapter);
3195 return -ENOMEM;
3196 }
3197
3198
be_ctrl_cleanup(struct be_adapter * adapter)3199 static void be_ctrl_cleanup(struct be_adapter *adapter)
3200 {
3201 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3202
3203 be_unmap_pci_bars(adapter);
3204
3205 if (mem->va)
3206 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3207 mem->dma);
3208
3209 mem = &adapter->rx_filter;
3210 if (mem->va)
3211 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3212 mem->dma);
3213 }
3214
be_ctrl_init(struct be_adapter * adapter)3215 static int be_ctrl_init(struct be_adapter *adapter)
3216 {
3217 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3218 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3219 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3220 int status;
3221
3222 status = be_map_pci_bars(adapter);
3223 if (status)
3224 goto done;
3225
3226 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3227 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3228 mbox_mem_alloc->size,
3229 &mbox_mem_alloc->dma,
3230 GFP_KERNEL);
3231 if (!mbox_mem_alloc->va) {
3232 status = -ENOMEM;
3233 goto unmap_pci_bars;
3234 }
3235 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3236 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3237 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3238 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3239
3240 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3241 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3242 &rx_filter->dma, GFP_KERNEL);
3243 if (rx_filter->va == NULL) {
3244 status = -ENOMEM;
3245 goto free_mbox;
3246 }
3247 memset(rx_filter->va, 0, rx_filter->size);
3248
3249 mutex_init(&adapter->mbox_lock);
3250 spin_lock_init(&adapter->mcc_lock);
3251 spin_lock_init(&adapter->mcc_cq_lock);
3252
3253 init_completion(&adapter->flash_compl);
3254 pci_save_state(adapter->pdev);
3255 return 0;
3256
3257 free_mbox:
3258 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3259 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3260
3261 unmap_pci_bars:
3262 be_unmap_pci_bars(adapter);
3263
3264 done:
3265 return status;
3266 }
3267
be_stats_cleanup(struct be_adapter * adapter)3268 static void be_stats_cleanup(struct be_adapter *adapter)
3269 {
3270 struct be_dma_mem *cmd = &adapter->stats_cmd;
3271
3272 if (cmd->va)
3273 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3274 cmd->va, cmd->dma);
3275 }
3276
be_stats_init(struct be_adapter * adapter)3277 static int be_stats_init(struct be_adapter *adapter)
3278 {
3279 struct be_dma_mem *cmd = &adapter->stats_cmd;
3280
3281 if (adapter->generation == BE_GEN2) {
3282 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3283 } else {
3284 if (lancer_chip(adapter))
3285 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3286 else
3287 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3288 }
3289 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3290 GFP_KERNEL);
3291 if (cmd->va == NULL)
3292 return -1;
3293 memset(cmd->va, 0, cmd->size);
3294 return 0;
3295 }
3296
be_remove(struct pci_dev * pdev)3297 static void __devexit be_remove(struct pci_dev *pdev)
3298 {
3299 struct be_adapter *adapter = pci_get_drvdata(pdev);
3300
3301 if (!adapter)
3302 return;
3303
3304 unregister_netdev(adapter->netdev);
3305
3306 be_clear(adapter);
3307
3308 be_stats_cleanup(adapter);
3309
3310 be_ctrl_cleanup(adapter);
3311
3312 be_sriov_disable(adapter);
3313
3314 pci_set_drvdata(pdev, NULL);
3315 pci_release_regions(pdev);
3316 pci_disable_device(pdev);
3317
3318 free_netdev(adapter->netdev);
3319 }
3320
be_is_wol_supported(struct be_adapter * adapter)3321 bool be_is_wol_supported(struct be_adapter *adapter)
3322 {
3323 return ((adapter->wol_cap & BE_WOL_CAP) &&
3324 !be_is_wol_excluded(adapter)) ? true : false;
3325 }
3326
be_get_config(struct be_adapter * adapter)3327 static int be_get_config(struct be_adapter *adapter)
3328 {
3329 int status;
3330
3331 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3332 &adapter->function_mode, &adapter->function_caps);
3333 if (status)
3334 return status;
3335
3336 if (adapter->function_mode & FLEX10_MODE)
3337 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3338 else
3339 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3340
3341 if (be_physfn(adapter))
3342 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3343 else
3344 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3345
3346 /* primary mac needs 1 pmac entry */
3347 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3348 sizeof(u32), GFP_KERNEL);
3349 if (!adapter->pmac_id)
3350 return -ENOMEM;
3351
3352 status = be_cmd_get_cntl_attributes(adapter);
3353 if (status)
3354 return status;
3355
3356 status = be_cmd_get_acpi_wol_cap(adapter);
3357 if (status) {
3358 /* in case of a failure to get wol capabillities
3359 * check the exclusion list to determine WOL capability */
3360 if (!be_is_wol_excluded(adapter))
3361 adapter->wol_cap |= BE_WOL_CAP;
3362 }
3363
3364 if (be_is_wol_supported(adapter))
3365 adapter->wol = true;
3366
3367 return 0;
3368 }
3369
be_dev_family_check(struct be_adapter * adapter)3370 static int be_dev_family_check(struct be_adapter *adapter)
3371 {
3372 struct pci_dev *pdev = adapter->pdev;
3373 u32 sli_intf = 0, if_type;
3374
3375 switch (pdev->device) {
3376 case BE_DEVICE_ID1:
3377 case OC_DEVICE_ID1:
3378 adapter->generation = BE_GEN2;
3379 break;
3380 case BE_DEVICE_ID2:
3381 case OC_DEVICE_ID2:
3382 case OC_DEVICE_ID5:
3383 adapter->generation = BE_GEN3;
3384 break;
3385 case OC_DEVICE_ID3:
3386 case OC_DEVICE_ID4:
3387 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3388 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3389 SLI_INTF_IF_TYPE_SHIFT;
3390
3391 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3392 if_type != 0x02) {
3393 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3394 return -EINVAL;
3395 }
3396 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3397 SLI_INTF_FAMILY_SHIFT);
3398 adapter->generation = BE_GEN3;
3399 break;
3400 default:
3401 adapter->generation = 0;
3402 }
3403 return 0;
3404 }
3405
lancer_wait_ready(struct be_adapter * adapter)3406 static int lancer_wait_ready(struct be_adapter *adapter)
3407 {
3408 #define SLIPORT_READY_TIMEOUT 30
3409 u32 sliport_status;
3410 int status = 0, i;
3411
3412 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3413 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3414 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3415 break;
3416
3417 msleep(1000);
3418 }
3419
3420 if (i == SLIPORT_READY_TIMEOUT)
3421 status = -1;
3422
3423 return status;
3424 }
3425
lancer_test_and_set_rdy_state(struct be_adapter * adapter)3426 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3427 {
3428 int status;
3429 u32 sliport_status, err, reset_needed;
3430 status = lancer_wait_ready(adapter);
3431 if (!status) {
3432 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3433 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3434 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3435 if (err && reset_needed) {
3436 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3437 adapter->db + SLIPORT_CONTROL_OFFSET);
3438
3439 /* check adapter has corrected the error */
3440 status = lancer_wait_ready(adapter);
3441 sliport_status = ioread32(adapter->db +
3442 SLIPORT_STATUS_OFFSET);
3443 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3444 SLIPORT_STATUS_RN_MASK);
3445 if (status || sliport_status)
3446 status = -1;
3447 } else if (err || reset_needed) {
3448 status = -1;
3449 }
3450 }
3451 return status;
3452 }
3453
lancer_test_and_recover_fn_err(struct be_adapter * adapter)3454 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3455 {
3456 int status;
3457 u32 sliport_status;
3458
3459 if (adapter->eeh_err || adapter->ue_detected)
3460 return;
3461
3462 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3463
3464 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3465 dev_err(&adapter->pdev->dev,
3466 "Adapter in error state."
3467 "Trying to recover.\n");
3468
3469 status = lancer_test_and_set_rdy_state(adapter);
3470 if (status)
3471 goto err;
3472
3473 netif_device_detach(adapter->netdev);
3474
3475 if (netif_running(adapter->netdev))
3476 be_close(adapter->netdev);
3477
3478 be_clear(adapter);
3479
3480 adapter->fw_timeout = false;
3481
3482 status = be_setup(adapter);
3483 if (status)
3484 goto err;
3485
3486 if (netif_running(adapter->netdev)) {
3487 status = be_open(adapter->netdev);
3488 if (status)
3489 goto err;
3490 }
3491
3492 netif_device_attach(adapter->netdev);
3493
3494 dev_err(&adapter->pdev->dev,
3495 "Adapter error recovery succeeded\n");
3496 }
3497 return;
3498 err:
3499 dev_err(&adapter->pdev->dev,
3500 "Adapter error recovery failed\n");
3501 }
3502
be_worker(struct work_struct * work)3503 static void be_worker(struct work_struct *work)
3504 {
3505 struct be_adapter *adapter =
3506 container_of(work, struct be_adapter, work.work);
3507 struct be_rx_obj *rxo;
3508 struct be_eq_obj *eqo;
3509 int i;
3510
3511 if (lancer_chip(adapter))
3512 lancer_test_and_recover_fn_err(adapter);
3513
3514 be_detect_dump_ue(adapter);
3515
3516 /* when interrupts are not yet enabled, just reap any pending
3517 * mcc completions */
3518 if (!netif_running(adapter->netdev)) {
3519 be_process_mcc(adapter);
3520 goto reschedule;
3521 }
3522
3523 if (!adapter->stats_cmd_sent) {
3524 if (lancer_chip(adapter))
3525 lancer_cmd_get_pport_stats(adapter,
3526 &adapter->stats_cmd);
3527 else
3528 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3529 }
3530
3531 for_all_rx_queues(adapter, rxo, i) {
3532 if (rxo->rx_post_starved) {
3533 rxo->rx_post_starved = false;
3534 be_post_rx_frags(rxo, GFP_KERNEL);
3535 }
3536 }
3537
3538 for_all_evt_queues(adapter, eqo, i)
3539 be_eqd_update(adapter, eqo);
3540
3541 reschedule:
3542 adapter->work_counter++;
3543 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3544 }
3545
be_probe(struct pci_dev * pdev,const struct pci_device_id * pdev_id)3546 static int __devinit be_probe(struct pci_dev *pdev,
3547 const struct pci_device_id *pdev_id)
3548 {
3549 int status = 0;
3550 struct be_adapter *adapter;
3551 struct net_device *netdev;
3552
3553 status = pci_enable_device(pdev);
3554 if (status)
3555 goto do_none;
3556
3557 status = pci_request_regions(pdev, DRV_NAME);
3558 if (status)
3559 goto disable_dev;
3560 pci_set_master(pdev);
3561
3562 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3563 if (netdev == NULL) {
3564 status = -ENOMEM;
3565 goto rel_reg;
3566 }
3567 adapter = netdev_priv(netdev);
3568 adapter->pdev = pdev;
3569 pci_set_drvdata(pdev, adapter);
3570
3571 status = be_dev_family_check(adapter);
3572 if (status)
3573 goto free_netdev;
3574
3575 adapter->netdev = netdev;
3576 SET_NETDEV_DEV(netdev, &pdev->dev);
3577
3578 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3579 if (!status) {
3580 netdev->features |= NETIF_F_HIGHDMA;
3581 } else {
3582 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3583 if (status) {
3584 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3585 goto free_netdev;
3586 }
3587 }
3588
3589 status = be_sriov_enable(adapter);
3590 if (status)
3591 goto free_netdev;
3592
3593 status = be_ctrl_init(adapter);
3594 if (status)
3595 goto disable_sriov;
3596
3597 if (lancer_chip(adapter)) {
3598 status = lancer_wait_ready(adapter);
3599 if (!status) {
3600 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3601 adapter->db + SLIPORT_CONTROL_OFFSET);
3602 status = lancer_test_and_set_rdy_state(adapter);
3603 }
3604 if (status) {
3605 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3606 goto ctrl_clean;
3607 }
3608 }
3609
3610 /* sync up with fw's ready state */
3611 if (be_physfn(adapter)) {
3612 status = be_cmd_POST(adapter);
3613 if (status)
3614 goto ctrl_clean;
3615 }
3616
3617 /* tell fw we're ready to fire cmds */
3618 status = be_cmd_fw_init(adapter);
3619 if (status)
3620 goto ctrl_clean;
3621
3622 status = be_cmd_reset_function(adapter);
3623 if (status)
3624 goto ctrl_clean;
3625
3626 /* The INTR bit may be set in the card when probed by a kdump kernel
3627 * after a crash.
3628 */
3629 if (!lancer_chip(adapter))
3630 be_intr_set(adapter, false);
3631
3632 status = be_stats_init(adapter);
3633 if (status)
3634 goto ctrl_clean;
3635
3636 status = be_get_config(adapter);
3637 if (status)
3638 goto stats_clean;
3639
3640 INIT_DELAYED_WORK(&adapter->work, be_worker);
3641 adapter->rx_fc = adapter->tx_fc = true;
3642
3643 status = be_setup(adapter);
3644 if (status)
3645 goto msix_disable;
3646
3647 be_netdev_init(netdev);
3648 status = register_netdev(netdev);
3649 if (status != 0)
3650 goto unsetup;
3651
3652 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3653 adapter->port_num);
3654
3655 return 0;
3656
3657 unsetup:
3658 be_clear(adapter);
3659 msix_disable:
3660 be_msix_disable(adapter);
3661 stats_clean:
3662 be_stats_cleanup(adapter);
3663 ctrl_clean:
3664 be_ctrl_cleanup(adapter);
3665 disable_sriov:
3666 be_sriov_disable(adapter);
3667 free_netdev:
3668 free_netdev(netdev);
3669 pci_set_drvdata(pdev, NULL);
3670 rel_reg:
3671 pci_release_regions(pdev);
3672 disable_dev:
3673 pci_disable_device(pdev);
3674 do_none:
3675 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3676 return status;
3677 }
3678
be_suspend(struct pci_dev * pdev,pm_message_t state)3679 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3680 {
3681 struct be_adapter *adapter = pci_get_drvdata(pdev);
3682 struct net_device *netdev = adapter->netdev;
3683
3684 if (adapter->wol)
3685 be_setup_wol(adapter, true);
3686
3687 netif_device_detach(netdev);
3688 if (netif_running(netdev)) {
3689 rtnl_lock();
3690 be_close(netdev);
3691 rtnl_unlock();
3692 }
3693 be_clear(adapter);
3694
3695 pci_save_state(pdev);
3696 pci_disable_device(pdev);
3697 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3698 return 0;
3699 }
3700
be_resume(struct pci_dev * pdev)3701 static int be_resume(struct pci_dev *pdev)
3702 {
3703 int status = 0;
3704 struct be_adapter *adapter = pci_get_drvdata(pdev);
3705 struct net_device *netdev = adapter->netdev;
3706
3707 netif_device_detach(netdev);
3708
3709 status = pci_enable_device(pdev);
3710 if (status)
3711 return status;
3712
3713 pci_set_power_state(pdev, 0);
3714 pci_restore_state(pdev);
3715
3716 /* tell fw we're ready to fire cmds */
3717 status = be_cmd_fw_init(adapter);
3718 if (status)
3719 return status;
3720
3721 be_setup(adapter);
3722 if (netif_running(netdev)) {
3723 rtnl_lock();
3724 be_open(netdev);
3725 rtnl_unlock();
3726 }
3727 netif_device_attach(netdev);
3728
3729 if (adapter->wol)
3730 be_setup_wol(adapter, false);
3731
3732 return 0;
3733 }
3734
3735 /*
3736 * An FLR will stop BE from DMAing any data.
3737 */
be_shutdown(struct pci_dev * pdev)3738 static void be_shutdown(struct pci_dev *pdev)
3739 {
3740 struct be_adapter *adapter = pci_get_drvdata(pdev);
3741
3742 if (!adapter)
3743 return;
3744
3745 cancel_delayed_work_sync(&adapter->work);
3746
3747 netif_device_detach(adapter->netdev);
3748
3749 if (adapter->wol)
3750 be_setup_wol(adapter, true);
3751
3752 be_cmd_reset_function(adapter);
3753
3754 pci_disable_device(pdev);
3755 }
3756
be_eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)3757 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3758 pci_channel_state_t state)
3759 {
3760 struct be_adapter *adapter = pci_get_drvdata(pdev);
3761 struct net_device *netdev = adapter->netdev;
3762
3763 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3764
3765 adapter->eeh_err = true;
3766
3767 netif_device_detach(netdev);
3768
3769 if (netif_running(netdev)) {
3770 rtnl_lock();
3771 be_close(netdev);
3772 rtnl_unlock();
3773 }
3774 be_clear(adapter);
3775
3776 if (state == pci_channel_io_perm_failure)
3777 return PCI_ERS_RESULT_DISCONNECT;
3778
3779 pci_disable_device(pdev);
3780
3781 return PCI_ERS_RESULT_NEED_RESET;
3782 }
3783
be_eeh_reset(struct pci_dev * pdev)3784 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3785 {
3786 struct be_adapter *adapter = pci_get_drvdata(pdev);
3787 int status;
3788
3789 dev_info(&adapter->pdev->dev, "EEH reset\n");
3790 adapter->eeh_err = false;
3791 adapter->ue_detected = false;
3792 adapter->fw_timeout = false;
3793
3794 status = pci_enable_device(pdev);
3795 if (status)
3796 return PCI_ERS_RESULT_DISCONNECT;
3797
3798 pci_set_master(pdev);
3799 pci_set_power_state(pdev, 0);
3800 pci_restore_state(pdev);
3801
3802 /* Check if card is ok and fw is ready */
3803 status = be_cmd_POST(adapter);
3804 if (status)
3805 return PCI_ERS_RESULT_DISCONNECT;
3806
3807 return PCI_ERS_RESULT_RECOVERED;
3808 }
3809
be_eeh_resume(struct pci_dev * pdev)3810 static void be_eeh_resume(struct pci_dev *pdev)
3811 {
3812 int status = 0;
3813 struct be_adapter *adapter = pci_get_drvdata(pdev);
3814 struct net_device *netdev = adapter->netdev;
3815
3816 dev_info(&adapter->pdev->dev, "EEH resume\n");
3817
3818 pci_save_state(pdev);
3819
3820 /* tell fw we're ready to fire cmds */
3821 status = be_cmd_fw_init(adapter);
3822 if (status)
3823 goto err;
3824
3825 status = be_setup(adapter);
3826 if (status)
3827 goto err;
3828
3829 if (netif_running(netdev)) {
3830 status = be_open(netdev);
3831 if (status)
3832 goto err;
3833 }
3834 netif_device_attach(netdev);
3835 return;
3836 err:
3837 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3838 }
3839
3840 static struct pci_error_handlers be_eeh_handlers = {
3841 .error_detected = be_eeh_err_detected,
3842 .slot_reset = be_eeh_reset,
3843 .resume = be_eeh_resume,
3844 };
3845
3846 static struct pci_driver be_driver = {
3847 .name = DRV_NAME,
3848 .id_table = be_dev_ids,
3849 .probe = be_probe,
3850 .remove = be_remove,
3851 .suspend = be_suspend,
3852 .resume = be_resume,
3853 .shutdown = be_shutdown,
3854 .err_handler = &be_eeh_handlers
3855 };
3856
be_init_module(void)3857 static int __init be_init_module(void)
3858 {
3859 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3860 rx_frag_size != 2048) {
3861 printk(KERN_WARNING DRV_NAME
3862 " : Module param rx_frag_size must be 2048/4096/8192."
3863 " Using 2048\n");
3864 rx_frag_size = 2048;
3865 }
3866
3867 return pci_register_driver(&be_driver);
3868 }
3869 module_init(be_init_module);
3870
be_exit_module(void)3871 static void __exit be_exit_module(void)
3872 {
3873 pci_unregister_driver(&be_driver);
3874 }
3875 module_exit(be_exit_module);
3876