1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <asm/uaccess.h>
51 
52 #include "common.h"
53 #include "cxgb3_ioctl.h"
54 #include "regs.h"
55 #include "cxgb3_offload.h"
56 #include "version.h"
57 
58 #include "cxgb3_ctl_defs.h"
59 #include "t3_cpl.h"
60 #include "firmware_exports.h"
61 
62 enum {
63 	MAX_TXQ_ENTRIES = 16384,
64 	MAX_CTRL_TXQ_ENTRIES = 1024,
65 	MAX_RSPQ_ENTRIES = 16384,
66 	MAX_RX_BUFFERS = 16384,
67 	MAX_RX_JUMBO_BUFFERS = 16384,
68 	MIN_TXQ_ENTRIES = 4,
69 	MIN_CTRL_TXQ_ENTRIES = 4,
70 	MIN_RSPQ_ENTRIES = 32,
71 	MIN_FL_ENTRIES = 32
72 };
73 
74 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
75 
76 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
79 
80 #define EEPROM_MAGIC 0x38E2F10C
81 
82 #define CH_DEVICE(devid, idx) \
83 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
84 
85 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
86 	CH_DEVICE(0x20, 0),	/* PE9000 */
87 	CH_DEVICE(0x21, 1),	/* T302E */
88 	CH_DEVICE(0x22, 2),	/* T310E */
89 	CH_DEVICE(0x23, 3),	/* T320X */
90 	CH_DEVICE(0x24, 1),	/* T302X */
91 	CH_DEVICE(0x25, 3),	/* T320E */
92 	CH_DEVICE(0x26, 2),	/* T310X */
93 	CH_DEVICE(0x30, 2),	/* T3B10 */
94 	CH_DEVICE(0x31, 3),	/* T3B20 */
95 	CH_DEVICE(0x32, 1),	/* T3B02 */
96 	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
97 	CH_DEVICE(0x36, 3),	/* S320E-CR */
98 	CH_DEVICE(0x37, 7),	/* N320E-G2 */
99 	{0,}
100 };
101 
102 MODULE_DESCRIPTION(DRV_DESC);
103 MODULE_AUTHOR("Chelsio Communications");
104 MODULE_LICENSE("Dual BSD/GPL");
105 MODULE_VERSION(DRV_VERSION);
106 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
107 
108 static int dflt_msg_enable = DFLT_MSG_ENABLE;
109 
110 module_param(dflt_msg_enable, int, 0644);
111 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112 
113 /*
114  * The driver uses the best interrupt scheme available on a platform in the
115  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
116  * of these schemes the driver may consider as follows:
117  *
118  * msi = 2: choose from among all three options
119  * msi = 1: only consider MSI and pin interrupts
120  * msi = 0: force pin interrupts
121  */
122 static int msi = 2;
123 
124 module_param(msi, int, 0644);
125 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126 
127 /*
128  * The driver enables offload as a default.
129  * To disable it, use ofld_disable = 1.
130  */
131 
132 static int ofld_disable = 0;
133 
134 module_param(ofld_disable, int, 0644);
135 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136 
137 /*
138  * We have work elements that we need to cancel when an interface is taken
139  * down.  Normally the work elements would be executed by keventd but that
140  * can deadlock because of linkwatch.  If our close method takes the rtnl
141  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
142  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
143  * for our work to complete.  Get our own work queue to solve this.
144  */
145 struct workqueue_struct *cxgb3_wq;
146 
147 /**
148  *	link_report - show link status and link speed/duplex
149  *	@p: the port whose settings are to be reported
150  *
151  *	Shows the link status, speed, and duplex of a port.
152  */
link_report(struct net_device * dev)153 static void link_report(struct net_device *dev)
154 {
155 	if (!netif_carrier_ok(dev))
156 		printk(KERN_INFO "%s: link down\n", dev->name);
157 	else {
158 		const char *s = "10Mbps";
159 		const struct port_info *p = netdev_priv(dev);
160 
161 		switch (p->link_config.speed) {
162 		case SPEED_10000:
163 			s = "10Gbps";
164 			break;
165 		case SPEED_1000:
166 			s = "1000Mbps";
167 			break;
168 		case SPEED_100:
169 			s = "100Mbps";
170 			break;
171 		}
172 
173 		printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
174 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 	}
176 }
177 
enable_tx_fifo_drain(struct adapter * adapter,struct port_info * pi)178 static void enable_tx_fifo_drain(struct adapter *adapter,
179 				 struct port_info *pi)
180 {
181 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
182 			 F_ENDROPPKT);
183 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
184 	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
185 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186 }
187 
disable_tx_fifo_drain(struct adapter * adapter,struct port_info * pi)188 static void disable_tx_fifo_drain(struct adapter *adapter,
189 				  struct port_info *pi)
190 {
191 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192 			 F_ENDROPPKT, 0);
193 }
194 
t3_os_link_fault(struct adapter * adap,int port_id,int state)195 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
196 {
197 	struct net_device *dev = adap->port[port_id];
198 	struct port_info *pi = netdev_priv(dev);
199 
200 	if (state == netif_carrier_ok(dev))
201 		return;
202 
203 	if (state) {
204 		struct cmac *mac = &pi->mac;
205 
206 		netif_carrier_on(dev);
207 
208 		disable_tx_fifo_drain(adap, pi);
209 
210 		/* Clear local faults */
211 		t3_xgm_intr_disable(adap, pi->port_id);
212 		t3_read_reg(adap, A_XGM_INT_STATUS +
213 				    pi->mac.offset);
214 		t3_write_reg(adap,
215 			     A_XGM_INT_CAUSE + pi->mac.offset,
216 			     F_XGM_INT);
217 
218 		t3_set_reg_field(adap,
219 				 A_XGM_INT_ENABLE +
220 				 pi->mac.offset,
221 				 F_XGM_INT, F_XGM_INT);
222 		t3_xgm_intr_enable(adap, pi->port_id);
223 
224 		t3_mac_enable(mac, MAC_DIRECTION_TX);
225 	} else {
226 		netif_carrier_off(dev);
227 
228 		/* Flush TX FIFO */
229 		enable_tx_fifo_drain(adap, pi);
230 	}
231 	link_report(dev);
232 }
233 
234 /**
235  *	t3_os_link_changed - handle link status changes
236  *	@adapter: the adapter associated with the link change
237  *	@port_id: the port index whose limk status has changed
238  *	@link_stat: the new status of the link
239  *	@speed: the new speed setting
240  *	@duplex: the new duplex setting
241  *	@pause: the new flow-control setting
242  *
243  *	This is the OS-dependent handler for link status changes.  The OS
244  *	neutral handler takes care of most of the processing for these events,
245  *	then calls this handler for any OS-specific processing.
246  */
t3_os_link_changed(struct adapter * adapter,int port_id,int link_stat,int speed,int duplex,int pause)247 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
248 			int speed, int duplex, int pause)
249 {
250 	struct net_device *dev = adapter->port[port_id];
251 	struct port_info *pi = netdev_priv(dev);
252 	struct cmac *mac = &pi->mac;
253 
254 	/* Skip changes from disabled ports. */
255 	if (!netif_running(dev))
256 		return;
257 
258 	if (link_stat != netif_carrier_ok(dev)) {
259 		if (link_stat) {
260 			disable_tx_fifo_drain(adapter, pi);
261 
262 			t3_mac_enable(mac, MAC_DIRECTION_RX);
263 
264 			/* Clear local faults */
265 			t3_xgm_intr_disable(adapter, pi->port_id);
266 			t3_read_reg(adapter, A_XGM_INT_STATUS +
267 				    pi->mac.offset);
268 			t3_write_reg(adapter,
269 				     A_XGM_INT_CAUSE + pi->mac.offset,
270 				     F_XGM_INT);
271 
272 			t3_set_reg_field(adapter,
273 					 A_XGM_INT_ENABLE + pi->mac.offset,
274 					 F_XGM_INT, F_XGM_INT);
275 			t3_xgm_intr_enable(adapter, pi->port_id);
276 
277 			netif_carrier_on(dev);
278 		} else {
279 			netif_carrier_off(dev);
280 
281 			t3_xgm_intr_disable(adapter, pi->port_id);
282 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
283 			t3_set_reg_field(adapter,
284 					 A_XGM_INT_ENABLE + pi->mac.offset,
285 					 F_XGM_INT, 0);
286 
287 			if (is_10G(adapter))
288 				pi->phy.ops->power_down(&pi->phy, 1);
289 
290 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
291 			t3_mac_disable(mac, MAC_DIRECTION_RX);
292 			t3_link_start(&pi->phy, mac, &pi->link_config);
293 
294 			/* Flush TX FIFO */
295 			enable_tx_fifo_drain(adapter, pi);
296 		}
297 
298 		link_report(dev);
299 	}
300 }
301 
302 /**
303  *	t3_os_phymod_changed - handle PHY module changes
304  *	@phy: the PHY reporting the module change
305  *	@mod_type: new module type
306  *
307  *	This is the OS-dependent handler for PHY module changes.  It is
308  *	invoked when a PHY module is removed or inserted for any OS-specific
309  *	processing.
310  */
t3_os_phymod_changed(struct adapter * adap,int port_id)311 void t3_os_phymod_changed(struct adapter *adap, int port_id)
312 {
313 	static const char *mod_str[] = {
314 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315 	};
316 
317 	const struct net_device *dev = adap->port[port_id];
318 	const struct port_info *pi = netdev_priv(dev);
319 
320 	if (pi->phy.modtype == phy_modtype_none)
321 		printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
322 	else
323 		printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
324 		       mod_str[pi->phy.modtype]);
325 }
326 
cxgb_set_rxmode(struct net_device * dev)327 static void cxgb_set_rxmode(struct net_device *dev)
328 {
329 	struct port_info *pi = netdev_priv(dev);
330 
331 	t3_mac_set_rx_mode(&pi->mac, dev);
332 }
333 
334 /**
335  *	link_start - enable a port
336  *	@dev: the device to enable
337  *
338  *	Performs the MAC and PHY actions needed to enable a port.
339  */
link_start(struct net_device * dev)340 static void link_start(struct net_device *dev)
341 {
342 	struct port_info *pi = netdev_priv(dev);
343 	struct cmac *mac = &pi->mac;
344 
345 	t3_mac_reset(mac);
346 	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347 	t3_mac_set_mtu(mac, dev->mtu);
348 	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
349 	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
350 	t3_mac_set_rx_mode(mac, dev);
351 	t3_link_start(&pi->phy, mac, &pi->link_config);
352 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
353 }
354 
cxgb_disable_msi(struct adapter * adapter)355 static inline void cxgb_disable_msi(struct adapter *adapter)
356 {
357 	if (adapter->flags & USING_MSIX) {
358 		pci_disable_msix(adapter->pdev);
359 		adapter->flags &= ~USING_MSIX;
360 	} else if (adapter->flags & USING_MSI) {
361 		pci_disable_msi(adapter->pdev);
362 		adapter->flags &= ~USING_MSI;
363 	}
364 }
365 
366 /*
367  * Interrupt handler for asynchronous events used with MSI-X.
368  */
t3_async_intr_handler(int irq,void * cookie)369 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
370 {
371 	t3_slow_intr_handler(cookie);
372 	return IRQ_HANDLED;
373 }
374 
375 /*
376  * Name the MSI-X interrupts.
377  */
name_msix_vecs(struct adapter * adap)378 static void name_msix_vecs(struct adapter *adap)
379 {
380 	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
381 
382 	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
383 	adap->msix_info[0].desc[n] = 0;
384 
385 	for_each_port(adap, j) {
386 		struct net_device *d = adap->port[j];
387 		const struct port_info *pi = netdev_priv(d);
388 
389 		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
390 			snprintf(adap->msix_info[msi_idx].desc, n,
391 				 "%s-%d", d->name, pi->first_qset + i);
392 			adap->msix_info[msi_idx].desc[n] = 0;
393 		}
394 	}
395 }
396 
request_msix_data_irqs(struct adapter * adap)397 static int request_msix_data_irqs(struct adapter *adap)
398 {
399 	int i, j, err, qidx = 0;
400 
401 	for_each_port(adap, i) {
402 		int nqsets = adap2pinfo(adap, i)->nqsets;
403 
404 		for (j = 0; j < nqsets; ++j) {
405 			err = request_irq(adap->msix_info[qidx + 1].vec,
406 					  t3_intr_handler(adap,
407 							  adap->sge.qs[qidx].
408 							  rspq.polling), 0,
409 					  adap->msix_info[qidx + 1].desc,
410 					  &adap->sge.qs[qidx]);
411 			if (err) {
412 				while (--qidx >= 0)
413 					free_irq(adap->msix_info[qidx + 1].vec,
414 						 &adap->sge.qs[qidx]);
415 				return err;
416 			}
417 			qidx++;
418 		}
419 	}
420 	return 0;
421 }
422 
free_irq_resources(struct adapter * adapter)423 static void free_irq_resources(struct adapter *adapter)
424 {
425 	if (adapter->flags & USING_MSIX) {
426 		int i, n = 0;
427 
428 		free_irq(adapter->msix_info[0].vec, adapter);
429 		for_each_port(adapter, i)
430 			n += adap2pinfo(adapter, i)->nqsets;
431 
432 		for (i = 0; i < n; ++i)
433 			free_irq(adapter->msix_info[i + 1].vec,
434 				 &adapter->sge.qs[i]);
435 	} else
436 		free_irq(adapter->pdev->irq, adapter);
437 }
438 
await_mgmt_replies(struct adapter * adap,unsigned long init_cnt,unsigned long n)439 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440 			      unsigned long n)
441 {
442 	int attempts = 10;
443 
444 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445 		if (!--attempts)
446 			return -ETIMEDOUT;
447 		msleep(10);
448 	}
449 	return 0;
450 }
451 
init_tp_parity(struct adapter * adap)452 static int init_tp_parity(struct adapter *adap)
453 {
454 	int i;
455 	struct sk_buff *skb;
456 	struct cpl_set_tcb_field *greq;
457 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
458 
459 	t3_tp_set_offload_mode(adap, 1);
460 
461 	for (i = 0; i < 16; i++) {
462 		struct cpl_smt_write_req *req;
463 
464 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
465 		if (!skb)
466 			skb = adap->nofail_skb;
467 		if (!skb)
468 			goto alloc_skb_fail;
469 
470 		req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
471 		memset(req, 0, sizeof(*req));
472 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
474 		req->mtu_idx = NMTUS - 1;
475 		req->iff = i;
476 		t3_mgmt_tx(adap, skb);
477 		if (skb == adap->nofail_skb) {
478 			await_mgmt_replies(adap, cnt, i + 1);
479 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
480 			if (!adap->nofail_skb)
481 				goto alloc_skb_fail;
482 		}
483 	}
484 
485 	for (i = 0; i < 2048; i++) {
486 		struct cpl_l2t_write_req *req;
487 
488 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
489 		if (!skb)
490 			skb = adap->nofail_skb;
491 		if (!skb)
492 			goto alloc_skb_fail;
493 
494 		req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
495 		memset(req, 0, sizeof(*req));
496 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498 		req->params = htonl(V_L2T_W_IDX(i));
499 		t3_mgmt_tx(adap, skb);
500 		if (skb == adap->nofail_skb) {
501 			await_mgmt_replies(adap, cnt, 16 + i + 1);
502 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 			if (!adap->nofail_skb)
504 				goto alloc_skb_fail;
505 		}
506 	}
507 
508 	for (i = 0; i < 2048; i++) {
509 		struct cpl_rte_write_req *req;
510 
511 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512 		if (!skb)
513 			skb = adap->nofail_skb;
514 		if (!skb)
515 			goto alloc_skb_fail;
516 
517 		req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
518 		memset(req, 0, sizeof(*req));
519 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
522 		t3_mgmt_tx(adap, skb);
523 		if (skb == adap->nofail_skb) {
524 			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526 			if (!adap->nofail_skb)
527 				goto alloc_skb_fail;
528 		}
529 	}
530 
531 	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532 	if (!skb)
533 		skb = adap->nofail_skb;
534 	if (!skb)
535 		goto alloc_skb_fail;
536 
537 	greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
538 	memset(greq, 0, sizeof(*greq));
539 	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
540 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
541 	greq->mask = cpu_to_be64(1);
542 	t3_mgmt_tx(adap, skb);
543 
544 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 	if (skb == adap->nofail_skb) {
546 		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
547 		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548 	}
549 
550 	t3_tp_set_offload_mode(adap, 0);
551 	return i;
552 
553 alloc_skb_fail:
554 	t3_tp_set_offload_mode(adap, 0);
555 	return -ENOMEM;
556 }
557 
558 /**
559  *	setup_rss - configure RSS
560  *	@adap: the adapter
561  *
562  *	Sets up RSS to distribute packets to multiple receive queues.  We
563  *	configure the RSS CPU lookup table to distribute to the number of HW
564  *	receive queues, and the response queue lookup table to narrow that
565  *	down to the response queues actually configured for each port.
566  *	We always configure the RSS mapping for two ports since the mapping
567  *	table has plenty of entries.
568  */
setup_rss(struct adapter * adap)569 static void setup_rss(struct adapter *adap)
570 {
571 	int i;
572 	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573 	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
574 	u8 cpus[SGE_QSETS + 1];
575 	u16 rspq_map[RSS_TABLE_SIZE];
576 
577 	for (i = 0; i < SGE_QSETS; ++i)
578 		cpus[i] = i;
579 	cpus[SGE_QSETS] = 0xff;	/* terminator */
580 
581 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
582 		rspq_map[i] = i % nq0;
583 		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584 	}
585 
586 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588 		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589 }
590 
ring_dbs(struct adapter * adap)591 static void ring_dbs(struct adapter *adap)
592 {
593 	int i, j;
594 
595 	for (i = 0; i < SGE_QSETS; i++) {
596 		struct sge_qset *qs = &adap->sge.qs[i];
597 
598 		if (qs->adap)
599 			for (j = 0; j < SGE_TXQ_PER_SET; j++)
600 				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601 	}
602 }
603 
init_napi(struct adapter * adap)604 static void init_napi(struct adapter *adap)
605 {
606 	int i;
607 
608 	for (i = 0; i < SGE_QSETS; i++) {
609 		struct sge_qset *qs = &adap->sge.qs[i];
610 
611 		if (qs->adap)
612 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
613 				       64);
614 	}
615 
616 	/*
617 	 * netif_napi_add() can be called only once per napi_struct because it
618 	 * adds each new napi_struct to a list.  Be careful not to call it a
619 	 * second time, e.g., during EEH recovery, by making a note of it.
620 	 */
621 	adap->flags |= NAPI_INIT;
622 }
623 
624 /*
625  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
626  * both netdevices representing interfaces and the dummy ones for the extra
627  * queues.
628  */
quiesce_rx(struct adapter * adap)629 static void quiesce_rx(struct adapter *adap)
630 {
631 	int i;
632 
633 	for (i = 0; i < SGE_QSETS; i++)
634 		if (adap->sge.qs[i].adap)
635 			napi_disable(&adap->sge.qs[i].napi);
636 }
637 
enable_all_napi(struct adapter * adap)638 static void enable_all_napi(struct adapter *adap)
639 {
640 	int i;
641 	for (i = 0; i < SGE_QSETS; i++)
642 		if (adap->sge.qs[i].adap)
643 			napi_enable(&adap->sge.qs[i].napi);
644 }
645 
646 /**
647  *	set_qset_lro - Turn a queue set's LRO capability on and off
648  *	@dev: the device the qset is attached to
649  *	@qset_idx: the queue set index
650  *	@val: the LRO switch
651  *
652  *	Sets LRO on or off for a particular queue set.
653  *	the device's features flag is updated to reflect the LRO
654  *	capability when all queues belonging to the device are
655  *	in the same state.
656  */
set_qset_lro(struct net_device * dev,int qset_idx,int val)657 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
658 {
659 	struct port_info *pi = netdev_priv(dev);
660 	struct adapter *adapter = pi->adapter;
661 
662 	adapter->params.sge.qset[qset_idx].lro = !!val;
663 	adapter->sge.qs[qset_idx].lro_enabled = !!val;
664 }
665 
666 /**
667  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
668  *	@adap: the adapter
669  *
670  *	Determines how many sets of SGE queues to use and initializes them.
671  *	We support multiple queue sets per port if we have MSI-X, otherwise
672  *	just one queue set per port.
673  */
setup_sge_qsets(struct adapter * adap)674 static int setup_sge_qsets(struct adapter *adap)
675 {
676 	int i, j, err, irq_idx = 0, qset_idx = 0;
677 	unsigned int ntxq = SGE_TXQ_PER_SET;
678 
679 	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
680 		irq_idx = -1;
681 
682 	for_each_port(adap, i) {
683 		struct net_device *dev = adap->port[i];
684 		struct port_info *pi = netdev_priv(dev);
685 
686 		pi->qs = &adap->sge.qs[pi->first_qset];
687 		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
688 			set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
689 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
690 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
691 							     irq_idx,
692 				&adap->params.sge.qset[qset_idx], ntxq, dev,
693 				netdev_get_tx_queue(dev, j));
694 			if (err) {
695 				t3_free_sge_resources(adap);
696 				return err;
697 			}
698 		}
699 	}
700 
701 	return 0;
702 }
703 
attr_show(struct device * d,char * buf,ssize_t (* format)(struct net_device *,char *))704 static ssize_t attr_show(struct device *d, char *buf,
705 			 ssize_t(*format) (struct net_device *, char *))
706 {
707 	ssize_t len;
708 
709 	/* Synchronize with ioctls that may shut down the device */
710 	rtnl_lock();
711 	len = (*format) (to_net_dev(d), buf);
712 	rtnl_unlock();
713 	return len;
714 }
715 
attr_store(struct device * d,const char * buf,size_t len,ssize_t (* set)(struct net_device *,unsigned int),unsigned int min_val,unsigned int max_val)716 static ssize_t attr_store(struct device *d,
717 			  const char *buf, size_t len,
718 			  ssize_t(*set) (struct net_device *, unsigned int),
719 			  unsigned int min_val, unsigned int max_val)
720 {
721 	char *endp;
722 	ssize_t ret;
723 	unsigned int val;
724 
725 	if (!capable(CAP_NET_ADMIN))
726 		return -EPERM;
727 
728 	val = simple_strtoul(buf, &endp, 0);
729 	if (endp == buf || val < min_val || val > max_val)
730 		return -EINVAL;
731 
732 	rtnl_lock();
733 	ret = (*set) (to_net_dev(d), val);
734 	if (!ret)
735 		ret = len;
736 	rtnl_unlock();
737 	return ret;
738 }
739 
740 #define CXGB3_SHOW(name, val_expr) \
741 static ssize_t format_##name(struct net_device *dev, char *buf) \
742 { \
743 	struct port_info *pi = netdev_priv(dev); \
744 	struct adapter *adap = pi->adapter; \
745 	return sprintf(buf, "%u\n", val_expr); \
746 } \
747 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
748 			   char *buf) \
749 { \
750 	return attr_show(d, buf, format_##name); \
751 }
752 
set_nfilters(struct net_device * dev,unsigned int val)753 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
754 {
755 	struct port_info *pi = netdev_priv(dev);
756 	struct adapter *adap = pi->adapter;
757 	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
758 
759 	if (adap->flags & FULL_INIT_DONE)
760 		return -EBUSY;
761 	if (val && adap->params.rev == 0)
762 		return -EINVAL;
763 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
764 	    min_tids)
765 		return -EINVAL;
766 	adap->params.mc5.nfilters = val;
767 	return 0;
768 }
769 
store_nfilters(struct device * d,struct device_attribute * attr,const char * buf,size_t len)770 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
771 			      const char *buf, size_t len)
772 {
773 	return attr_store(d, buf, len, set_nfilters, 0, ~0);
774 }
775 
set_nservers(struct net_device * dev,unsigned int val)776 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
777 {
778 	struct port_info *pi = netdev_priv(dev);
779 	struct adapter *adap = pi->adapter;
780 
781 	if (adap->flags & FULL_INIT_DONE)
782 		return -EBUSY;
783 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
784 	    MC5_MIN_TIDS)
785 		return -EINVAL;
786 	adap->params.mc5.nservers = val;
787 	return 0;
788 }
789 
store_nservers(struct device * d,struct device_attribute * attr,const char * buf,size_t len)790 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
791 			      const char *buf, size_t len)
792 {
793 	return attr_store(d, buf, len, set_nservers, 0, ~0);
794 }
795 
796 #define CXGB3_ATTR_R(name, val_expr) \
797 CXGB3_SHOW(name, val_expr) \
798 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
799 
800 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
801 CXGB3_SHOW(name, val_expr) \
802 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
803 
804 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
805 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
806 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
807 
808 static struct attribute *cxgb3_attrs[] = {
809 	&dev_attr_cam_size.attr,
810 	&dev_attr_nfilters.attr,
811 	&dev_attr_nservers.attr,
812 	NULL
813 };
814 
815 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
816 
tm_attr_show(struct device * d,char * buf,int sched)817 static ssize_t tm_attr_show(struct device *d,
818 			    char *buf, int sched)
819 {
820 	struct port_info *pi = netdev_priv(to_net_dev(d));
821 	struct adapter *adap = pi->adapter;
822 	unsigned int v, addr, bpt, cpt;
823 	ssize_t len;
824 
825 	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
826 	rtnl_lock();
827 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
828 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
829 	if (sched & 1)
830 		v >>= 16;
831 	bpt = (v >> 8) & 0xff;
832 	cpt = v & 0xff;
833 	if (!cpt)
834 		len = sprintf(buf, "disabled\n");
835 	else {
836 		v = (adap->params.vpd.cclk * 1000) / cpt;
837 		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
838 	}
839 	rtnl_unlock();
840 	return len;
841 }
842 
tm_attr_store(struct device * d,const char * buf,size_t len,int sched)843 static ssize_t tm_attr_store(struct device *d,
844 			     const char *buf, size_t len, int sched)
845 {
846 	struct port_info *pi = netdev_priv(to_net_dev(d));
847 	struct adapter *adap = pi->adapter;
848 	unsigned int val;
849 	char *endp;
850 	ssize_t ret;
851 
852 	if (!capable(CAP_NET_ADMIN))
853 		return -EPERM;
854 
855 	val = simple_strtoul(buf, &endp, 0);
856 	if (endp == buf || val > 10000000)
857 		return -EINVAL;
858 
859 	rtnl_lock();
860 	ret = t3_config_sched(adap, val, sched);
861 	if (!ret)
862 		ret = len;
863 	rtnl_unlock();
864 	return ret;
865 }
866 
867 #define TM_ATTR(name, sched) \
868 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
869 			   char *buf) \
870 { \
871 	return tm_attr_show(d, buf, sched); \
872 } \
873 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
874 			    const char *buf, size_t len) \
875 { \
876 	return tm_attr_store(d, buf, len, sched); \
877 } \
878 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
879 
880 TM_ATTR(sched0, 0);
881 TM_ATTR(sched1, 1);
882 TM_ATTR(sched2, 2);
883 TM_ATTR(sched3, 3);
884 TM_ATTR(sched4, 4);
885 TM_ATTR(sched5, 5);
886 TM_ATTR(sched6, 6);
887 TM_ATTR(sched7, 7);
888 
889 static struct attribute *offload_attrs[] = {
890 	&dev_attr_sched0.attr,
891 	&dev_attr_sched1.attr,
892 	&dev_attr_sched2.attr,
893 	&dev_attr_sched3.attr,
894 	&dev_attr_sched4.attr,
895 	&dev_attr_sched5.attr,
896 	&dev_attr_sched6.attr,
897 	&dev_attr_sched7.attr,
898 	NULL
899 };
900 
901 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
902 
903 /*
904  * Sends an sk_buff to an offload queue driver
905  * after dealing with any active network taps.
906  */
offload_tx(struct t3cdev * tdev,struct sk_buff * skb)907 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
908 {
909 	int ret;
910 
911 	local_bh_disable();
912 	ret = t3_offload_tx(tdev, skb);
913 	local_bh_enable();
914 	return ret;
915 }
916 
write_smt_entry(struct adapter * adapter,int idx)917 static int write_smt_entry(struct adapter *adapter, int idx)
918 {
919 	struct cpl_smt_write_req *req;
920 	struct port_info *pi = netdev_priv(adapter->port[idx]);
921 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 
923 	if (!skb)
924 		return -ENOMEM;
925 
926 	req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
927 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
928 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
929 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
930 	req->iff = idx;
931 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
932 	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
933 	skb->priority = 1;
934 	offload_tx(&adapter->tdev, skb);
935 	return 0;
936 }
937 
init_smt(struct adapter * adapter)938 static int init_smt(struct adapter *adapter)
939 {
940 	int i;
941 
942 	for_each_port(adapter, i)
943 	    write_smt_entry(adapter, i);
944 	return 0;
945 }
946 
init_port_mtus(struct adapter * adapter)947 static void init_port_mtus(struct adapter *adapter)
948 {
949 	unsigned int mtus = adapter->port[0]->mtu;
950 
951 	if (adapter->port[1])
952 		mtus |= adapter->port[1]->mtu << 16;
953 	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
954 }
955 
send_pktsched_cmd(struct adapter * adap,int sched,int qidx,int lo,int hi,int port)956 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
957 			      int hi, int port)
958 {
959 	struct sk_buff *skb;
960 	struct mngt_pktsched_wr *req;
961 	int ret;
962 
963 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
964 	if (!skb)
965 		skb = adap->nofail_skb;
966 	if (!skb)
967 		return -ENOMEM;
968 
969 	req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
970 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
971 	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
972 	req->sched = sched;
973 	req->idx = qidx;
974 	req->min = lo;
975 	req->max = hi;
976 	req->binding = port;
977 	ret = t3_mgmt_tx(adap, skb);
978 	if (skb == adap->nofail_skb) {
979 		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
980 					     GFP_KERNEL);
981 		if (!adap->nofail_skb)
982 			ret = -ENOMEM;
983 	}
984 
985 	return ret;
986 }
987 
bind_qsets(struct adapter * adap)988 static int bind_qsets(struct adapter *adap)
989 {
990 	int i, j, err = 0;
991 
992 	for_each_port(adap, i) {
993 		const struct port_info *pi = adap2pinfo(adap, i);
994 
995 		for (j = 0; j < pi->nqsets; ++j) {
996 			int ret = send_pktsched_cmd(adap, 1,
997 						    pi->first_qset + j, -1,
998 						    -1, i);
999 			if (ret)
1000 				err = ret;
1001 		}
1002 	}
1003 
1004 	return err;
1005 }
1006 
1007 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
1008 	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
1009 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
1010 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
1011 	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1012 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1013 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1014 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1015 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1016 MODULE_FIRMWARE(FW_FNAME);
1017 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1018 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1019 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1020 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1021 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1022 
get_edc_fw_name(int edc_idx)1023 static inline const char *get_edc_fw_name(int edc_idx)
1024 {
1025 	const char *fw_name = NULL;
1026 
1027 	switch (edc_idx) {
1028 	case EDC_OPT_AEL2005:
1029 		fw_name = AEL2005_OPT_EDC_NAME;
1030 		break;
1031 	case EDC_TWX_AEL2005:
1032 		fw_name = AEL2005_TWX_EDC_NAME;
1033 		break;
1034 	case EDC_TWX_AEL2020:
1035 		fw_name = AEL2020_TWX_EDC_NAME;
1036 		break;
1037 	}
1038 	return fw_name;
1039 }
1040 
t3_get_edc_fw(struct cphy * phy,int edc_idx,int size)1041 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1042 {
1043 	struct adapter *adapter = phy->adapter;
1044 	const struct firmware *fw;
1045 	char buf[64];
1046 	u32 csum;
1047 	const __be32 *p;
1048 	u16 *cache = phy->phy_cache;
1049 	int i, ret;
1050 
1051 	snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1052 
1053 	ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1054 	if (ret < 0) {
1055 		dev_err(&adapter->pdev->dev,
1056 			"could not upgrade firmware: unable to load %s\n",
1057 			buf);
1058 		return ret;
1059 	}
1060 
1061 	/* check size, take checksum in account */
1062 	if (fw->size > size + 4) {
1063 		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1064 		       (unsigned int)fw->size, size + 4);
1065 		ret = -EINVAL;
1066 	}
1067 
1068 	/* compute checksum */
1069 	p = (const __be32 *)fw->data;
1070 	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1071 		csum += ntohl(p[i]);
1072 
1073 	if (csum != 0xffffffff) {
1074 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1075 		       csum);
1076 		ret = -EINVAL;
1077 	}
1078 
1079 	for (i = 0; i < size / 4 ; i++) {
1080 		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1081 		*cache++ = be32_to_cpu(p[i]) & 0xffff;
1082 	}
1083 
1084 	release_firmware(fw);
1085 
1086 	return ret;
1087 }
1088 
upgrade_fw(struct adapter * adap)1089 static int upgrade_fw(struct adapter *adap)
1090 {
1091 	int ret;
1092 	const struct firmware *fw;
1093 	struct device *dev = &adap->pdev->dev;
1094 
1095 	ret = request_firmware(&fw, FW_FNAME, dev);
1096 	if (ret < 0) {
1097 		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1098 			FW_FNAME);
1099 		return ret;
1100 	}
1101 	ret = t3_load_fw(adap, fw->data, fw->size);
1102 	release_firmware(fw);
1103 
1104 	if (ret == 0)
1105 		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1106 			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1107 	else
1108 		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1109 			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1110 
1111 	return ret;
1112 }
1113 
t3rev2char(struct adapter * adapter)1114 static inline char t3rev2char(struct adapter *adapter)
1115 {
1116 	char rev = 0;
1117 
1118 	switch(adapter->params.rev) {
1119 	case T3_REV_B:
1120 	case T3_REV_B2:
1121 		rev = 'b';
1122 		break;
1123 	case T3_REV_C:
1124 		rev = 'c';
1125 		break;
1126 	}
1127 	return rev;
1128 }
1129 
update_tpsram(struct adapter * adap)1130 static int update_tpsram(struct adapter *adap)
1131 {
1132 	const struct firmware *tpsram;
1133 	char buf[64];
1134 	struct device *dev = &adap->pdev->dev;
1135 	int ret;
1136 	char rev;
1137 
1138 	rev = t3rev2char(adap);
1139 	if (!rev)
1140 		return 0;
1141 
1142 	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1143 
1144 	ret = request_firmware(&tpsram, buf, dev);
1145 	if (ret < 0) {
1146 		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1147 			buf);
1148 		return ret;
1149 	}
1150 
1151 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1152 	if (ret)
1153 		goto release_tpsram;
1154 
1155 	ret = t3_set_proto_sram(adap, tpsram->data);
1156 	if (ret == 0)
1157 		dev_info(dev,
1158 			 "successful update of protocol engine "
1159 			 "to %d.%d.%d\n",
1160 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1161 	else
1162 		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1163 			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1164 	if (ret)
1165 		dev_err(dev, "loading protocol SRAM failed\n");
1166 
1167 release_tpsram:
1168 	release_firmware(tpsram);
1169 
1170 	return ret;
1171 }
1172 
1173 /**
1174  *	cxgb_up - enable the adapter
1175  *	@adapter: adapter being enabled
1176  *
1177  *	Called when the first port is enabled, this function performs the
1178  *	actions necessary to make an adapter operational, such as completing
1179  *	the initialization of HW modules, and enabling interrupts.
1180  *
1181  *	Must be called with the rtnl lock held.
1182  */
cxgb_up(struct adapter * adap)1183 static int cxgb_up(struct adapter *adap)
1184 {
1185 	int err;
1186 
1187 	if (!(adap->flags & FULL_INIT_DONE)) {
1188 		err = t3_check_fw_version(adap);
1189 		if (err == -EINVAL) {
1190 			err = upgrade_fw(adap);
1191 			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1192 				FW_VERSION_MAJOR, FW_VERSION_MINOR,
1193 				FW_VERSION_MICRO, err ? "failed" : "succeeded");
1194 		}
1195 
1196 		err = t3_check_tpsram_version(adap);
1197 		if (err == -EINVAL) {
1198 			err = update_tpsram(adap);
1199 			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1200 				TP_VERSION_MAJOR, TP_VERSION_MINOR,
1201 				TP_VERSION_MICRO, err ? "failed" : "succeeded");
1202 		}
1203 
1204 		/*
1205 		 * Clear interrupts now to catch errors if t3_init_hw fails.
1206 		 * We clear them again later as initialization may trigger
1207 		 * conditions that can interrupt.
1208 		 */
1209 		t3_intr_clear(adap);
1210 
1211 		err = t3_init_hw(adap, 0);
1212 		if (err)
1213 			goto out;
1214 
1215 		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1216 		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1217 
1218 		err = setup_sge_qsets(adap);
1219 		if (err)
1220 			goto out;
1221 
1222 		setup_rss(adap);
1223 		if (!(adap->flags & NAPI_INIT))
1224 			init_napi(adap);
1225 
1226 		t3_start_sge_timers(adap);
1227 		adap->flags |= FULL_INIT_DONE;
1228 	}
1229 
1230 	t3_intr_clear(adap);
1231 
1232 	if (adap->flags & USING_MSIX) {
1233 		name_msix_vecs(adap);
1234 		err = request_irq(adap->msix_info[0].vec,
1235 				  t3_async_intr_handler, 0,
1236 				  adap->msix_info[0].desc, adap);
1237 		if (err)
1238 			goto irq_err;
1239 
1240 		err = request_msix_data_irqs(adap);
1241 		if (err) {
1242 			free_irq(adap->msix_info[0].vec, adap);
1243 			goto irq_err;
1244 		}
1245 	} else if ((err = request_irq(adap->pdev->irq,
1246 				      t3_intr_handler(adap,
1247 						      adap->sge.qs[0].rspq.
1248 						      polling),
1249 				      (adap->flags & USING_MSI) ?
1250 				       0 : IRQF_SHARED,
1251 				      adap->name, adap)))
1252 		goto irq_err;
1253 
1254 	enable_all_napi(adap);
1255 	t3_sge_start(adap);
1256 	t3_intr_enable(adap);
1257 
1258 	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1259 	    is_offload(adap) && init_tp_parity(adap) == 0)
1260 		adap->flags |= TP_PARITY_INIT;
1261 
1262 	if (adap->flags & TP_PARITY_INIT) {
1263 		t3_write_reg(adap, A_TP_INT_CAUSE,
1264 			     F_CMCACHEPERR | F_ARPLUTPERR);
1265 		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1266 	}
1267 
1268 	if (!(adap->flags & QUEUES_BOUND)) {
1269 		int ret = bind_qsets(adap);
1270 
1271 		if (ret < 0) {
1272 			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1273 			t3_intr_disable(adap);
1274 			free_irq_resources(adap);
1275 			err = ret;
1276 			goto out;
1277 		}
1278 		adap->flags |= QUEUES_BOUND;
1279 	}
1280 
1281 out:
1282 	return err;
1283 irq_err:
1284 	CH_ERR(adap, "request_irq failed, err %d\n", err);
1285 	goto out;
1286 }
1287 
1288 /*
1289  * Release resources when all the ports and offloading have been stopped.
1290  */
cxgb_down(struct adapter * adapter,int on_wq)1291 static void cxgb_down(struct adapter *adapter, int on_wq)
1292 {
1293 	t3_sge_stop(adapter);
1294 	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
1295 	t3_intr_disable(adapter);
1296 	spin_unlock_irq(&adapter->work_lock);
1297 
1298 	free_irq_resources(adapter);
1299 	quiesce_rx(adapter);
1300 	t3_sge_stop(adapter);
1301 	if (!on_wq)
1302 		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1303 }
1304 
schedule_chk_task(struct adapter * adap)1305 static void schedule_chk_task(struct adapter *adap)
1306 {
1307 	unsigned int timeo;
1308 
1309 	timeo = adap->params.linkpoll_period ?
1310 	    (HZ * adap->params.linkpoll_period) / 10 :
1311 	    adap->params.stats_update_period * HZ;
1312 	if (timeo)
1313 		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1314 }
1315 
offload_open(struct net_device * dev)1316 static int offload_open(struct net_device *dev)
1317 {
1318 	struct port_info *pi = netdev_priv(dev);
1319 	struct adapter *adapter = pi->adapter;
1320 	struct t3cdev *tdev = dev2t3cdev(dev);
1321 	int adap_up = adapter->open_device_map & PORT_MASK;
1322 	int err;
1323 
1324 	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1325 		return 0;
1326 
1327 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
1328 		goto out;
1329 
1330 	t3_tp_set_offload_mode(adapter, 1);
1331 	tdev->lldev = adapter->port[0];
1332 	err = cxgb3_offload_activate(adapter);
1333 	if (err)
1334 		goto out;
1335 
1336 	init_port_mtus(adapter);
1337 	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1338 		     adapter->params.b_wnd,
1339 		     adapter->params.rev == 0 ?
1340 		     adapter->port[0]->mtu : 0xffff);
1341 	init_smt(adapter);
1342 
1343 	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1344 		dev_dbg(&dev->dev, "cannot create sysfs group\n");
1345 
1346 	/* Call back all registered clients */
1347 	cxgb3_add_clients(tdev);
1348 
1349 out:
1350 	/* restore them in case the offload module has changed them */
1351 	if (err) {
1352 		t3_tp_set_offload_mode(adapter, 0);
1353 		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1354 		cxgb3_set_dummy_ops(tdev);
1355 	}
1356 	return err;
1357 }
1358 
offload_close(struct t3cdev * tdev)1359 static int offload_close(struct t3cdev *tdev)
1360 {
1361 	struct adapter *adapter = tdev2adap(tdev);
1362 	struct t3c_data *td = T3C_DATA(tdev);
1363 
1364 	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1365 		return 0;
1366 
1367 	/* Call back all registered clients */
1368 	cxgb3_remove_clients(tdev);
1369 
1370 	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1371 
1372 	/* Flush work scheduled while releasing TIDs */
1373 	flush_work_sync(&td->tid_release_task);
1374 
1375 	tdev->lldev = NULL;
1376 	cxgb3_set_dummy_ops(tdev);
1377 	t3_tp_set_offload_mode(adapter, 0);
1378 	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1379 
1380 	if (!adapter->open_device_map)
1381 		cxgb_down(adapter, 0);
1382 
1383 	cxgb3_offload_deactivate(adapter);
1384 	return 0;
1385 }
1386 
cxgb_open(struct net_device * dev)1387 static int cxgb_open(struct net_device *dev)
1388 {
1389 	struct port_info *pi = netdev_priv(dev);
1390 	struct adapter *adapter = pi->adapter;
1391 	int other_ports = adapter->open_device_map & PORT_MASK;
1392 	int err;
1393 
1394 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1395 		return err;
1396 
1397 	set_bit(pi->port_id, &adapter->open_device_map);
1398 	if (is_offload(adapter) && !ofld_disable) {
1399 		err = offload_open(dev);
1400 		if (err)
1401 			printk(KERN_WARNING
1402 			       "Could not initialize offload capabilities\n");
1403 	}
1404 
1405 	netif_set_real_num_tx_queues(dev, pi->nqsets);
1406 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1407 	if (err)
1408 		return err;
1409 	link_start(dev);
1410 	t3_port_intr_enable(adapter, pi->port_id);
1411 	netif_tx_start_all_queues(dev);
1412 	if (!other_ports)
1413 		schedule_chk_task(adapter);
1414 
1415 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1416 	return 0;
1417 }
1418 
__cxgb_close(struct net_device * dev,int on_wq)1419 static int __cxgb_close(struct net_device *dev, int on_wq)
1420 {
1421 	struct port_info *pi = netdev_priv(dev);
1422 	struct adapter *adapter = pi->adapter;
1423 
1424 
1425 	if (!adapter->open_device_map)
1426 		return 0;
1427 
1428 	/* Stop link fault interrupts */
1429 	t3_xgm_intr_disable(adapter, pi->port_id);
1430 	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1431 
1432 	t3_port_intr_disable(adapter, pi->port_id);
1433 	netif_tx_stop_all_queues(dev);
1434 	pi->phy.ops->power_down(&pi->phy, 1);
1435 	netif_carrier_off(dev);
1436 	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1437 
1438 	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
1439 	clear_bit(pi->port_id, &adapter->open_device_map);
1440 	spin_unlock_irq(&adapter->work_lock);
1441 
1442 	if (!(adapter->open_device_map & PORT_MASK))
1443 		cancel_delayed_work_sync(&adapter->adap_check_task);
1444 
1445 	if (!adapter->open_device_map)
1446 		cxgb_down(adapter, on_wq);
1447 
1448 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1449 	return 0;
1450 }
1451 
cxgb_close(struct net_device * dev)1452 static int cxgb_close(struct net_device *dev)
1453 {
1454 	return __cxgb_close(dev, 0);
1455 }
1456 
cxgb_get_stats(struct net_device * dev)1457 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1458 {
1459 	struct port_info *pi = netdev_priv(dev);
1460 	struct adapter *adapter = pi->adapter;
1461 	struct net_device_stats *ns = &pi->netstats;
1462 	const struct mac_stats *pstats;
1463 
1464 	spin_lock(&adapter->stats_lock);
1465 	pstats = t3_mac_update_stats(&pi->mac);
1466 	spin_unlock(&adapter->stats_lock);
1467 
1468 	ns->tx_bytes = pstats->tx_octets;
1469 	ns->tx_packets = pstats->tx_frames;
1470 	ns->rx_bytes = pstats->rx_octets;
1471 	ns->rx_packets = pstats->rx_frames;
1472 	ns->multicast = pstats->rx_mcast_frames;
1473 
1474 	ns->tx_errors = pstats->tx_underrun;
1475 	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1476 	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1477 	    pstats->rx_fifo_ovfl;
1478 
1479 	/* detailed rx_errors */
1480 	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1481 	ns->rx_over_errors = 0;
1482 	ns->rx_crc_errors = pstats->rx_fcs_errs;
1483 	ns->rx_frame_errors = pstats->rx_symbol_errs;
1484 	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1485 	ns->rx_missed_errors = pstats->rx_cong_drops;
1486 
1487 	/* detailed tx_errors */
1488 	ns->tx_aborted_errors = 0;
1489 	ns->tx_carrier_errors = 0;
1490 	ns->tx_fifo_errors = pstats->tx_underrun;
1491 	ns->tx_heartbeat_errors = 0;
1492 	ns->tx_window_errors = 0;
1493 	return ns;
1494 }
1495 
get_msglevel(struct net_device * dev)1496 static u32 get_msglevel(struct net_device *dev)
1497 {
1498 	struct port_info *pi = netdev_priv(dev);
1499 	struct adapter *adapter = pi->adapter;
1500 
1501 	return adapter->msg_enable;
1502 }
1503 
set_msglevel(struct net_device * dev,u32 val)1504 static void set_msglevel(struct net_device *dev, u32 val)
1505 {
1506 	struct port_info *pi = netdev_priv(dev);
1507 	struct adapter *adapter = pi->adapter;
1508 
1509 	adapter->msg_enable = val;
1510 }
1511 
1512 static char stats_strings[][ETH_GSTRING_LEN] = {
1513 	"TxOctetsOK         ",
1514 	"TxFramesOK         ",
1515 	"TxMulticastFramesOK",
1516 	"TxBroadcastFramesOK",
1517 	"TxPauseFrames      ",
1518 	"TxUnderrun         ",
1519 	"TxExtUnderrun      ",
1520 
1521 	"TxFrames64         ",
1522 	"TxFrames65To127    ",
1523 	"TxFrames128To255   ",
1524 	"TxFrames256To511   ",
1525 	"TxFrames512To1023  ",
1526 	"TxFrames1024To1518 ",
1527 	"TxFrames1519ToMax  ",
1528 
1529 	"RxOctetsOK         ",
1530 	"RxFramesOK         ",
1531 	"RxMulticastFramesOK",
1532 	"RxBroadcastFramesOK",
1533 	"RxPauseFrames      ",
1534 	"RxFCSErrors        ",
1535 	"RxSymbolErrors     ",
1536 	"RxShortErrors      ",
1537 	"RxJabberErrors     ",
1538 	"RxLengthErrors     ",
1539 	"RxFIFOoverflow     ",
1540 
1541 	"RxFrames64         ",
1542 	"RxFrames65To127    ",
1543 	"RxFrames128To255   ",
1544 	"RxFrames256To511   ",
1545 	"RxFrames512To1023  ",
1546 	"RxFrames1024To1518 ",
1547 	"RxFrames1519ToMax  ",
1548 
1549 	"PhyFIFOErrors      ",
1550 	"TSO                ",
1551 	"VLANextractions    ",
1552 	"VLANinsertions     ",
1553 	"TxCsumOffload      ",
1554 	"RxCsumGood         ",
1555 	"LroAggregated      ",
1556 	"LroFlushed         ",
1557 	"LroNoDesc          ",
1558 	"RxDrops            ",
1559 
1560 	"CheckTXEnToggled   ",
1561 	"CheckResets        ",
1562 
1563 	"LinkFaults         ",
1564 };
1565 
get_sset_count(struct net_device * dev,int sset)1566 static int get_sset_count(struct net_device *dev, int sset)
1567 {
1568 	switch (sset) {
1569 	case ETH_SS_STATS:
1570 		return ARRAY_SIZE(stats_strings);
1571 	default:
1572 		return -EOPNOTSUPP;
1573 	}
1574 }
1575 
1576 #define T3_REGMAP_SIZE (3 * 1024)
1577 
get_regs_len(struct net_device * dev)1578 static int get_regs_len(struct net_device *dev)
1579 {
1580 	return T3_REGMAP_SIZE;
1581 }
1582 
get_eeprom_len(struct net_device * dev)1583 static int get_eeprom_len(struct net_device *dev)
1584 {
1585 	return EEPROMSIZE;
1586 }
1587 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1588 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1589 {
1590 	struct port_info *pi = netdev_priv(dev);
1591 	struct adapter *adapter = pi->adapter;
1592 	u32 fw_vers = 0;
1593 	u32 tp_vers = 0;
1594 
1595 	spin_lock(&adapter->stats_lock);
1596 	t3_get_fw_version(adapter, &fw_vers);
1597 	t3_get_tp_version(adapter, &tp_vers);
1598 	spin_unlock(&adapter->stats_lock);
1599 
1600 	strcpy(info->driver, DRV_NAME);
1601 	strcpy(info->version, DRV_VERSION);
1602 	strcpy(info->bus_info, pci_name(adapter->pdev));
1603 	if (!fw_vers)
1604 		strcpy(info->fw_version, "N/A");
1605 	else {
1606 		snprintf(info->fw_version, sizeof(info->fw_version),
1607 			 "%s %u.%u.%u TP %u.%u.%u",
1608 			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1609 			 G_FW_VERSION_MAJOR(fw_vers),
1610 			 G_FW_VERSION_MINOR(fw_vers),
1611 			 G_FW_VERSION_MICRO(fw_vers),
1612 			 G_TP_VERSION_MAJOR(tp_vers),
1613 			 G_TP_VERSION_MINOR(tp_vers),
1614 			 G_TP_VERSION_MICRO(tp_vers));
1615 	}
1616 }
1617 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1618 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1619 {
1620 	if (stringset == ETH_SS_STATS)
1621 		memcpy(data, stats_strings, sizeof(stats_strings));
1622 }
1623 
collect_sge_port_stats(struct adapter * adapter,struct port_info * p,int idx)1624 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1625 					    struct port_info *p, int idx)
1626 {
1627 	int i;
1628 	unsigned long tot = 0;
1629 
1630 	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1631 		tot += adapter->sge.qs[i].port_stats[idx];
1632 	return tot;
1633 }
1634 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1635 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1636 		      u64 *data)
1637 {
1638 	struct port_info *pi = netdev_priv(dev);
1639 	struct adapter *adapter = pi->adapter;
1640 	const struct mac_stats *s;
1641 
1642 	spin_lock(&adapter->stats_lock);
1643 	s = t3_mac_update_stats(&pi->mac);
1644 	spin_unlock(&adapter->stats_lock);
1645 
1646 	*data++ = s->tx_octets;
1647 	*data++ = s->tx_frames;
1648 	*data++ = s->tx_mcast_frames;
1649 	*data++ = s->tx_bcast_frames;
1650 	*data++ = s->tx_pause;
1651 	*data++ = s->tx_underrun;
1652 	*data++ = s->tx_fifo_urun;
1653 
1654 	*data++ = s->tx_frames_64;
1655 	*data++ = s->tx_frames_65_127;
1656 	*data++ = s->tx_frames_128_255;
1657 	*data++ = s->tx_frames_256_511;
1658 	*data++ = s->tx_frames_512_1023;
1659 	*data++ = s->tx_frames_1024_1518;
1660 	*data++ = s->tx_frames_1519_max;
1661 
1662 	*data++ = s->rx_octets;
1663 	*data++ = s->rx_frames;
1664 	*data++ = s->rx_mcast_frames;
1665 	*data++ = s->rx_bcast_frames;
1666 	*data++ = s->rx_pause;
1667 	*data++ = s->rx_fcs_errs;
1668 	*data++ = s->rx_symbol_errs;
1669 	*data++ = s->rx_short;
1670 	*data++ = s->rx_jabber;
1671 	*data++ = s->rx_too_long;
1672 	*data++ = s->rx_fifo_ovfl;
1673 
1674 	*data++ = s->rx_frames_64;
1675 	*data++ = s->rx_frames_65_127;
1676 	*data++ = s->rx_frames_128_255;
1677 	*data++ = s->rx_frames_256_511;
1678 	*data++ = s->rx_frames_512_1023;
1679 	*data++ = s->rx_frames_1024_1518;
1680 	*data++ = s->rx_frames_1519_max;
1681 
1682 	*data++ = pi->phy.fifo_errors;
1683 
1684 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1685 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1686 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1687 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1688 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1689 	*data++ = 0;
1690 	*data++ = 0;
1691 	*data++ = 0;
1692 	*data++ = s->rx_cong_drops;
1693 
1694 	*data++ = s->num_toggled;
1695 	*data++ = s->num_resets;
1696 
1697 	*data++ = s->link_faults;
1698 }
1699 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)1700 static inline void reg_block_dump(struct adapter *ap, void *buf,
1701 				  unsigned int start, unsigned int end)
1702 {
1703 	u32 *p = buf + start;
1704 
1705 	for (; start <= end; start += sizeof(u32))
1706 		*p++ = t3_read_reg(ap, start);
1707 }
1708 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1709 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1710 		     void *buf)
1711 {
1712 	struct port_info *pi = netdev_priv(dev);
1713 	struct adapter *ap = pi->adapter;
1714 
1715 	/*
1716 	 * Version scheme:
1717 	 * bits 0..9: chip version
1718 	 * bits 10..15: chip revision
1719 	 * bit 31: set for PCIe cards
1720 	 */
1721 	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1722 
1723 	/*
1724 	 * We skip the MAC statistics registers because they are clear-on-read.
1725 	 * Also reading multi-register stats would need to synchronize with the
1726 	 * periodic mac stats accumulation.  Hard to justify the complexity.
1727 	 */
1728 	memset(buf, 0, T3_REGMAP_SIZE);
1729 	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1730 	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1731 	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1732 	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1733 	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1734 	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1735 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
1736 	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1737 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1738 }
1739 
restart_autoneg(struct net_device * dev)1740 static int restart_autoneg(struct net_device *dev)
1741 {
1742 	struct port_info *p = netdev_priv(dev);
1743 
1744 	if (!netif_running(dev))
1745 		return -EAGAIN;
1746 	if (p->link_config.autoneg != AUTONEG_ENABLE)
1747 		return -EINVAL;
1748 	p->phy.ops->autoneg_restart(&p->phy);
1749 	return 0;
1750 }
1751 
cxgb3_phys_id(struct net_device * dev,u32 data)1752 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1753 {
1754 	struct port_info *pi = netdev_priv(dev);
1755 	struct adapter *adapter = pi->adapter;
1756 	int i;
1757 
1758 	if (data == 0)
1759 		data = 2;
1760 
1761 	for (i = 0; i < data * 2; i++) {
1762 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1763 				 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1764 		if (msleep_interruptible(500))
1765 			break;
1766 	}
1767 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1768 			 F_GPIO0_OUT_VAL);
1769 	return 0;
1770 }
1771 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1772 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1773 {
1774 	struct port_info *p = netdev_priv(dev);
1775 
1776 	cmd->supported = p->link_config.supported;
1777 	cmd->advertising = p->link_config.advertising;
1778 
1779 	if (netif_carrier_ok(dev)) {
1780 		cmd->speed = p->link_config.speed;
1781 		cmd->duplex = p->link_config.duplex;
1782 	} else {
1783 		cmd->speed = -1;
1784 		cmd->duplex = -1;
1785 	}
1786 
1787 	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1788 	cmd->phy_address = p->phy.mdio.prtad;
1789 	cmd->transceiver = XCVR_EXTERNAL;
1790 	cmd->autoneg = p->link_config.autoneg;
1791 	cmd->maxtxpkt = 0;
1792 	cmd->maxrxpkt = 0;
1793 	return 0;
1794 }
1795 
speed_duplex_to_caps(int speed,int duplex)1796 static int speed_duplex_to_caps(int speed, int duplex)
1797 {
1798 	int cap = 0;
1799 
1800 	switch (speed) {
1801 	case SPEED_10:
1802 		if (duplex == DUPLEX_FULL)
1803 			cap = SUPPORTED_10baseT_Full;
1804 		else
1805 			cap = SUPPORTED_10baseT_Half;
1806 		break;
1807 	case SPEED_100:
1808 		if (duplex == DUPLEX_FULL)
1809 			cap = SUPPORTED_100baseT_Full;
1810 		else
1811 			cap = SUPPORTED_100baseT_Half;
1812 		break;
1813 	case SPEED_1000:
1814 		if (duplex == DUPLEX_FULL)
1815 			cap = SUPPORTED_1000baseT_Full;
1816 		else
1817 			cap = SUPPORTED_1000baseT_Half;
1818 		break;
1819 	case SPEED_10000:
1820 		if (duplex == DUPLEX_FULL)
1821 			cap = SUPPORTED_10000baseT_Full;
1822 	}
1823 	return cap;
1824 }
1825 
1826 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1827 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1828 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1829 		      ADVERTISED_10000baseT_Full)
1830 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1831 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832 {
1833 	struct port_info *p = netdev_priv(dev);
1834 	struct link_config *lc = &p->link_config;
1835 
1836 	if (!(lc->supported & SUPPORTED_Autoneg)) {
1837 		/*
1838 		 * PHY offers a single speed/duplex.  See if that's what's
1839 		 * being requested.
1840 		 */
1841 		if (cmd->autoneg == AUTONEG_DISABLE) {
1842 			int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1843 			if (lc->supported & cap)
1844 				return 0;
1845 		}
1846 		return -EINVAL;
1847 	}
1848 
1849 	if (cmd->autoneg == AUTONEG_DISABLE) {
1850 		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1851 
1852 		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1853 			return -EINVAL;
1854 		lc->requested_speed = cmd->speed;
1855 		lc->requested_duplex = cmd->duplex;
1856 		lc->advertising = 0;
1857 	} else {
1858 		cmd->advertising &= ADVERTISED_MASK;
1859 		cmd->advertising &= lc->supported;
1860 		if (!cmd->advertising)
1861 			return -EINVAL;
1862 		lc->requested_speed = SPEED_INVALID;
1863 		lc->requested_duplex = DUPLEX_INVALID;
1864 		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1865 	}
1866 	lc->autoneg = cmd->autoneg;
1867 	if (netif_running(dev))
1868 		t3_link_start(&p->phy, &p->mac, lc);
1869 	return 0;
1870 }
1871 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1872 static void get_pauseparam(struct net_device *dev,
1873 			   struct ethtool_pauseparam *epause)
1874 {
1875 	struct port_info *p = netdev_priv(dev);
1876 
1877 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1878 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1879 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1880 }
1881 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1882 static int set_pauseparam(struct net_device *dev,
1883 			  struct ethtool_pauseparam *epause)
1884 {
1885 	struct port_info *p = netdev_priv(dev);
1886 	struct link_config *lc = &p->link_config;
1887 
1888 	if (epause->autoneg == AUTONEG_DISABLE)
1889 		lc->requested_fc = 0;
1890 	else if (lc->supported & SUPPORTED_Autoneg)
1891 		lc->requested_fc = PAUSE_AUTONEG;
1892 	else
1893 		return -EINVAL;
1894 
1895 	if (epause->rx_pause)
1896 		lc->requested_fc |= PAUSE_RX;
1897 	if (epause->tx_pause)
1898 		lc->requested_fc |= PAUSE_TX;
1899 	if (lc->autoneg == AUTONEG_ENABLE) {
1900 		if (netif_running(dev))
1901 			t3_link_start(&p->phy, &p->mac, lc);
1902 	} else {
1903 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1904 		if (netif_running(dev))
1905 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1906 	}
1907 	return 0;
1908 }
1909 
get_rx_csum(struct net_device * dev)1910 static u32 get_rx_csum(struct net_device *dev)
1911 {
1912 	struct port_info *p = netdev_priv(dev);
1913 
1914 	return p->rx_offload & T3_RX_CSUM;
1915 }
1916 
set_rx_csum(struct net_device * dev,u32 data)1917 static int set_rx_csum(struct net_device *dev, u32 data)
1918 {
1919 	struct port_info *p = netdev_priv(dev);
1920 
1921 	if (data) {
1922 		p->rx_offload |= T3_RX_CSUM;
1923 	} else {
1924 		int i;
1925 
1926 		p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1927 		for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1928 			set_qset_lro(dev, i, 0);
1929 	}
1930 	return 0;
1931 }
1932 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1933 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1934 {
1935 	struct port_info *pi = netdev_priv(dev);
1936 	struct adapter *adapter = pi->adapter;
1937 	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1938 
1939 	e->rx_max_pending = MAX_RX_BUFFERS;
1940 	e->rx_mini_max_pending = 0;
1941 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1942 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1943 
1944 	e->rx_pending = q->fl_size;
1945 	e->rx_mini_pending = q->rspq_size;
1946 	e->rx_jumbo_pending = q->jumbo_size;
1947 	e->tx_pending = q->txq_size[0];
1948 }
1949 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1950 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1951 {
1952 	struct port_info *pi = netdev_priv(dev);
1953 	struct adapter *adapter = pi->adapter;
1954 	struct qset_params *q;
1955 	int i;
1956 
1957 	if (e->rx_pending > MAX_RX_BUFFERS ||
1958 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1959 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1960 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1961 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1962 	    e->rx_pending < MIN_FL_ENTRIES ||
1963 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1964 	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1965 		return -EINVAL;
1966 
1967 	if (adapter->flags & FULL_INIT_DONE)
1968 		return -EBUSY;
1969 
1970 	q = &adapter->params.sge.qset[pi->first_qset];
1971 	for (i = 0; i < pi->nqsets; ++i, ++q) {
1972 		q->rspq_size = e->rx_mini_pending;
1973 		q->fl_size = e->rx_pending;
1974 		q->jumbo_size = e->rx_jumbo_pending;
1975 		q->txq_size[0] = e->tx_pending;
1976 		q->txq_size[1] = e->tx_pending;
1977 		q->txq_size[2] = e->tx_pending;
1978 	}
1979 	return 0;
1980 }
1981 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)1982 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1983 {
1984 	struct port_info *pi = netdev_priv(dev);
1985 	struct adapter *adapter = pi->adapter;
1986 	struct qset_params *qsp;
1987 	struct sge_qset *qs;
1988 	int i;
1989 
1990 	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1991 		return -EINVAL;
1992 
1993 	for (i = 0; i < pi->nqsets; i++) {
1994 		qsp = &adapter->params.sge.qset[i];
1995 		qs = &adapter->sge.qs[i];
1996 		qsp->coalesce_usecs = c->rx_coalesce_usecs;
1997 		t3_update_qset_coalesce(qs, qsp);
1998 	}
1999 
2000 	return 0;
2001 }
2002 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)2003 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2004 {
2005 	struct port_info *pi = netdev_priv(dev);
2006 	struct adapter *adapter = pi->adapter;
2007 	struct qset_params *q = adapter->params.sge.qset;
2008 
2009 	c->rx_coalesce_usecs = q->coalesce_usecs;
2010 	return 0;
2011 }
2012 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)2013 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2014 		      u8 * data)
2015 {
2016 	struct port_info *pi = netdev_priv(dev);
2017 	struct adapter *adapter = pi->adapter;
2018 	int i, err = 0;
2019 
2020 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2021 	if (!buf)
2022 		return -ENOMEM;
2023 
2024 	e->magic = EEPROM_MAGIC;
2025 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2026 		err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2027 
2028 	if (!err)
2029 		memcpy(data, buf + e->offset, e->len);
2030 	kfree(buf);
2031 	return err;
2032 }
2033 
set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)2034 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2035 		      u8 * data)
2036 {
2037 	struct port_info *pi = netdev_priv(dev);
2038 	struct adapter *adapter = pi->adapter;
2039 	u32 aligned_offset, aligned_len;
2040 	__le32 *p;
2041 	u8 *buf;
2042 	int err;
2043 
2044 	if (eeprom->magic != EEPROM_MAGIC)
2045 		return -EINVAL;
2046 
2047 	aligned_offset = eeprom->offset & ~3;
2048 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2049 
2050 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2051 		buf = kmalloc(aligned_len, GFP_KERNEL);
2052 		if (!buf)
2053 			return -ENOMEM;
2054 		err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2055 		if (!err && aligned_len > 4)
2056 			err = t3_seeprom_read(adapter,
2057 					      aligned_offset + aligned_len - 4,
2058 					      (__le32 *) & buf[aligned_len - 4]);
2059 		if (err)
2060 			goto out;
2061 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2062 	} else
2063 		buf = data;
2064 
2065 	err = t3_seeprom_wp(adapter, 0);
2066 	if (err)
2067 		goto out;
2068 
2069 	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2070 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2071 		aligned_offset += 4;
2072 	}
2073 
2074 	if (!err)
2075 		err = t3_seeprom_wp(adapter, 1);
2076 out:
2077 	if (buf != data)
2078 		kfree(buf);
2079 	return err;
2080 }
2081 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2082 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2083 {
2084 	wol->supported = 0;
2085 	wol->wolopts = 0;
2086 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2087 }
2088 
2089 static const struct ethtool_ops cxgb_ethtool_ops = {
2090 	.get_settings = get_settings,
2091 	.set_settings = set_settings,
2092 	.get_drvinfo = get_drvinfo,
2093 	.get_msglevel = get_msglevel,
2094 	.set_msglevel = set_msglevel,
2095 	.get_ringparam = get_sge_param,
2096 	.set_ringparam = set_sge_param,
2097 	.get_coalesce = get_coalesce,
2098 	.set_coalesce = set_coalesce,
2099 	.get_eeprom_len = get_eeprom_len,
2100 	.get_eeprom = get_eeprom,
2101 	.set_eeprom = set_eeprom,
2102 	.get_pauseparam = get_pauseparam,
2103 	.set_pauseparam = set_pauseparam,
2104 	.get_rx_csum = get_rx_csum,
2105 	.set_rx_csum = set_rx_csum,
2106 	.set_tx_csum = ethtool_op_set_tx_csum,
2107 	.set_sg = ethtool_op_set_sg,
2108 	.get_link = ethtool_op_get_link,
2109 	.get_strings = get_strings,
2110 	.phys_id = cxgb3_phys_id,
2111 	.nway_reset = restart_autoneg,
2112 	.get_sset_count = get_sset_count,
2113 	.get_ethtool_stats = get_stats,
2114 	.get_regs_len = get_regs_len,
2115 	.get_regs = get_regs,
2116 	.get_wol = get_wol,
2117 	.set_tso = ethtool_op_set_tso,
2118 };
2119 
in_range(int val,int lo,int hi)2120 static int in_range(int val, int lo, int hi)
2121 {
2122 	return val < 0 || (val <= hi && val >= lo);
2123 }
2124 
cxgb_extension_ioctl(struct net_device * dev,void __user * useraddr)2125 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2126 {
2127 	struct port_info *pi = netdev_priv(dev);
2128 	struct adapter *adapter = pi->adapter;
2129 	u32 cmd;
2130 	int ret;
2131 
2132 	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2133 		return -EFAULT;
2134 
2135 	switch (cmd) {
2136 	case CHELSIO_SET_QSET_PARAMS:{
2137 		int i;
2138 		struct qset_params *q;
2139 		struct ch_qset_params t;
2140 		int q1 = pi->first_qset;
2141 		int nqsets = pi->nqsets;
2142 
2143 		if (!capable(CAP_NET_ADMIN))
2144 			return -EPERM;
2145 		if (copy_from_user(&t, useraddr, sizeof(t)))
2146 			return -EFAULT;
2147 		if (t.qset_idx >= SGE_QSETS)
2148 			return -EINVAL;
2149 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2150 		    !in_range(t.cong_thres, 0, 255) ||
2151 		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2152 			      MAX_TXQ_ENTRIES) ||
2153 		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2154 			      MAX_TXQ_ENTRIES) ||
2155 		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2156 			      MAX_CTRL_TXQ_ENTRIES) ||
2157 		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2158 			      MAX_RX_BUFFERS) ||
2159 		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2160 			      MAX_RX_JUMBO_BUFFERS) ||
2161 		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2162 			      MAX_RSPQ_ENTRIES))
2163 			return -EINVAL;
2164 
2165 		if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2166 			for_each_port(adapter, i) {
2167 				pi = adap2pinfo(adapter, i);
2168 				if (t.qset_idx >= pi->first_qset &&
2169 				    t.qset_idx < pi->first_qset + pi->nqsets &&
2170 				    !(pi->rx_offload & T3_RX_CSUM))
2171 					return -EINVAL;
2172 			}
2173 
2174 		if ((adapter->flags & FULL_INIT_DONE) &&
2175 			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2176 			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2177 			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2178 			t.polling >= 0 || t.cong_thres >= 0))
2179 			return -EBUSY;
2180 
2181 		/* Allow setting of any available qset when offload enabled */
2182 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2183 			q1 = 0;
2184 			for_each_port(adapter, i) {
2185 				pi = adap2pinfo(adapter, i);
2186 				nqsets += pi->first_qset + pi->nqsets;
2187 			}
2188 		}
2189 
2190 		if (t.qset_idx < q1)
2191 			return -EINVAL;
2192 		if (t.qset_idx > q1 + nqsets - 1)
2193 			return -EINVAL;
2194 
2195 		q = &adapter->params.sge.qset[t.qset_idx];
2196 
2197 		if (t.rspq_size >= 0)
2198 			q->rspq_size = t.rspq_size;
2199 		if (t.fl_size[0] >= 0)
2200 			q->fl_size = t.fl_size[0];
2201 		if (t.fl_size[1] >= 0)
2202 			q->jumbo_size = t.fl_size[1];
2203 		if (t.txq_size[0] >= 0)
2204 			q->txq_size[0] = t.txq_size[0];
2205 		if (t.txq_size[1] >= 0)
2206 			q->txq_size[1] = t.txq_size[1];
2207 		if (t.txq_size[2] >= 0)
2208 			q->txq_size[2] = t.txq_size[2];
2209 		if (t.cong_thres >= 0)
2210 			q->cong_thres = t.cong_thres;
2211 		if (t.intr_lat >= 0) {
2212 			struct sge_qset *qs =
2213 				&adapter->sge.qs[t.qset_idx];
2214 
2215 			q->coalesce_usecs = t.intr_lat;
2216 			t3_update_qset_coalesce(qs, q);
2217 		}
2218 		if (t.polling >= 0) {
2219 			if (adapter->flags & USING_MSIX)
2220 				q->polling = t.polling;
2221 			else {
2222 				/* No polling with INTx for T3A */
2223 				if (adapter->params.rev == 0 &&
2224 					!(adapter->flags & USING_MSI))
2225 					t.polling = 0;
2226 
2227 				for (i = 0; i < SGE_QSETS; i++) {
2228 					q = &adapter->params.sge.
2229 						qset[i];
2230 					q->polling = t.polling;
2231 				}
2232 			}
2233 		}
2234 		if (t.lro >= 0)
2235 			set_qset_lro(dev, t.qset_idx, t.lro);
2236 
2237 		break;
2238 	}
2239 	case CHELSIO_GET_QSET_PARAMS:{
2240 		struct qset_params *q;
2241 		struct ch_qset_params t;
2242 		int q1 = pi->first_qset;
2243 		int nqsets = pi->nqsets;
2244 		int i;
2245 
2246 		if (copy_from_user(&t, useraddr, sizeof(t)))
2247 			return -EFAULT;
2248 
2249 		/* Display qsets for all ports when offload enabled */
2250 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2251 			q1 = 0;
2252 			for_each_port(adapter, i) {
2253 				pi = adap2pinfo(adapter, i);
2254 				nqsets = pi->first_qset + pi->nqsets;
2255 			}
2256 		}
2257 
2258 		if (t.qset_idx >= nqsets)
2259 			return -EINVAL;
2260 
2261 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
2262 		t.rspq_size = q->rspq_size;
2263 		t.txq_size[0] = q->txq_size[0];
2264 		t.txq_size[1] = q->txq_size[1];
2265 		t.txq_size[2] = q->txq_size[2];
2266 		t.fl_size[0] = q->fl_size;
2267 		t.fl_size[1] = q->jumbo_size;
2268 		t.polling = q->polling;
2269 		t.lro = q->lro;
2270 		t.intr_lat = q->coalesce_usecs;
2271 		t.cong_thres = q->cong_thres;
2272 		t.qnum = q1;
2273 
2274 		if (adapter->flags & USING_MSIX)
2275 			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2276 		else
2277 			t.vector = adapter->pdev->irq;
2278 
2279 		if (copy_to_user(useraddr, &t, sizeof(t)))
2280 			return -EFAULT;
2281 		break;
2282 	}
2283 	case CHELSIO_SET_QSET_NUM:{
2284 		struct ch_reg edata;
2285 		unsigned int i, first_qset = 0, other_qsets = 0;
2286 
2287 		if (!capable(CAP_NET_ADMIN))
2288 			return -EPERM;
2289 		if (adapter->flags & FULL_INIT_DONE)
2290 			return -EBUSY;
2291 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
2292 			return -EFAULT;
2293 		if (edata.val < 1 ||
2294 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
2295 			return -EINVAL;
2296 
2297 		for_each_port(adapter, i)
2298 			if (adapter->port[i] && adapter->port[i] != dev)
2299 				other_qsets += adap2pinfo(adapter, i)->nqsets;
2300 
2301 		if (edata.val + other_qsets > SGE_QSETS)
2302 			return -EINVAL;
2303 
2304 		pi->nqsets = edata.val;
2305 
2306 		for_each_port(adapter, i)
2307 			if (adapter->port[i]) {
2308 				pi = adap2pinfo(adapter, i);
2309 				pi->first_qset = first_qset;
2310 				first_qset += pi->nqsets;
2311 			}
2312 		break;
2313 	}
2314 	case CHELSIO_GET_QSET_NUM:{
2315 		struct ch_reg edata;
2316 
2317 		memset(&edata, 0, sizeof(struct ch_reg));
2318 
2319 		edata.cmd = CHELSIO_GET_QSET_NUM;
2320 		edata.val = pi->nqsets;
2321 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
2322 			return -EFAULT;
2323 		break;
2324 	}
2325 	case CHELSIO_LOAD_FW:{
2326 		u8 *fw_data;
2327 		struct ch_mem_range t;
2328 
2329 		if (!capable(CAP_SYS_RAWIO))
2330 			return -EPERM;
2331 		if (copy_from_user(&t, useraddr, sizeof(t)))
2332 			return -EFAULT;
2333 		/* Check t.len sanity ? */
2334 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
2335 		if (IS_ERR(fw_data))
2336 			return PTR_ERR(fw_data);
2337 
2338 		ret = t3_load_fw(adapter, fw_data, t.len);
2339 		kfree(fw_data);
2340 		if (ret)
2341 			return ret;
2342 		break;
2343 	}
2344 	case CHELSIO_SETMTUTAB:{
2345 		struct ch_mtus m;
2346 		int i;
2347 
2348 		if (!is_offload(adapter))
2349 			return -EOPNOTSUPP;
2350 		if (!capable(CAP_NET_ADMIN))
2351 			return -EPERM;
2352 		if (offload_running(adapter))
2353 			return -EBUSY;
2354 		if (copy_from_user(&m, useraddr, sizeof(m)))
2355 			return -EFAULT;
2356 		if (m.nmtus != NMTUS)
2357 			return -EINVAL;
2358 		if (m.mtus[0] < 81)	/* accommodate SACK */
2359 			return -EINVAL;
2360 
2361 		/* MTUs must be in ascending order */
2362 		for (i = 1; i < NMTUS; ++i)
2363 			if (m.mtus[i] < m.mtus[i - 1])
2364 				return -EINVAL;
2365 
2366 		memcpy(adapter->params.mtus, m.mtus,
2367 			sizeof(adapter->params.mtus));
2368 		break;
2369 	}
2370 	case CHELSIO_GET_PM:{
2371 		struct tp_params *p = &adapter->params.tp;
2372 		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2373 
2374 		if (!is_offload(adapter))
2375 			return -EOPNOTSUPP;
2376 		m.tx_pg_sz = p->tx_pg_size;
2377 		m.tx_num_pg = p->tx_num_pgs;
2378 		m.rx_pg_sz = p->rx_pg_size;
2379 		m.rx_num_pg = p->rx_num_pgs;
2380 		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2381 		if (copy_to_user(useraddr, &m, sizeof(m)))
2382 			return -EFAULT;
2383 		break;
2384 	}
2385 	case CHELSIO_SET_PM:{
2386 		struct ch_pm m;
2387 		struct tp_params *p = &adapter->params.tp;
2388 
2389 		if (!is_offload(adapter))
2390 			return -EOPNOTSUPP;
2391 		if (!capable(CAP_NET_ADMIN))
2392 			return -EPERM;
2393 		if (adapter->flags & FULL_INIT_DONE)
2394 			return -EBUSY;
2395 		if (copy_from_user(&m, useraddr, sizeof(m)))
2396 			return -EFAULT;
2397 		if (!is_power_of_2(m.rx_pg_sz) ||
2398 			!is_power_of_2(m.tx_pg_sz))
2399 			return -EINVAL;	/* not power of 2 */
2400 		if (!(m.rx_pg_sz & 0x14000))
2401 			return -EINVAL;	/* not 16KB or 64KB */
2402 		if (!(m.tx_pg_sz & 0x1554000))
2403 			return -EINVAL;
2404 		if (m.tx_num_pg == -1)
2405 			m.tx_num_pg = p->tx_num_pgs;
2406 		if (m.rx_num_pg == -1)
2407 			m.rx_num_pg = p->rx_num_pgs;
2408 		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2409 			return -EINVAL;
2410 		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2411 			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2412 			return -EINVAL;
2413 		p->rx_pg_size = m.rx_pg_sz;
2414 		p->tx_pg_size = m.tx_pg_sz;
2415 		p->rx_num_pgs = m.rx_num_pg;
2416 		p->tx_num_pgs = m.tx_num_pg;
2417 		break;
2418 	}
2419 	case CHELSIO_GET_MEM:{
2420 		struct ch_mem_range t;
2421 		struct mc7 *mem;
2422 		u64 buf[32];
2423 
2424 		if (!is_offload(adapter))
2425 			return -EOPNOTSUPP;
2426 		if (!(adapter->flags & FULL_INIT_DONE))
2427 			return -EIO;	/* need the memory controllers */
2428 		if (copy_from_user(&t, useraddr, sizeof(t)))
2429 			return -EFAULT;
2430 		if ((t.addr & 7) || (t.len & 7))
2431 			return -EINVAL;
2432 		if (t.mem_id == MEM_CM)
2433 			mem = &adapter->cm;
2434 		else if (t.mem_id == MEM_PMRX)
2435 			mem = &adapter->pmrx;
2436 		else if (t.mem_id == MEM_PMTX)
2437 			mem = &adapter->pmtx;
2438 		else
2439 			return -EINVAL;
2440 
2441 		/*
2442 		 * Version scheme:
2443 		 * bits 0..9: chip version
2444 		 * bits 10..15: chip revision
2445 		 */
2446 		t.version = 3 | (adapter->params.rev << 10);
2447 		if (copy_to_user(useraddr, &t, sizeof(t)))
2448 			return -EFAULT;
2449 
2450 		/*
2451 		 * Read 256 bytes at a time as len can be large and we don't
2452 		 * want to use huge intermediate buffers.
2453 		 */
2454 		useraddr += sizeof(t);	/* advance to start of buffer */
2455 		while (t.len) {
2456 			unsigned int chunk =
2457 				min_t(unsigned int, t.len, sizeof(buf));
2458 
2459 			ret =
2460 				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2461 						buf);
2462 			if (ret)
2463 				return ret;
2464 			if (copy_to_user(useraddr, buf, chunk))
2465 				return -EFAULT;
2466 			useraddr += chunk;
2467 			t.addr += chunk;
2468 			t.len -= chunk;
2469 		}
2470 		break;
2471 	}
2472 	case CHELSIO_SET_TRACE_FILTER:{
2473 		struct ch_trace t;
2474 		const struct trace_params *tp;
2475 
2476 		if (!capable(CAP_NET_ADMIN))
2477 			return -EPERM;
2478 		if (!offload_running(adapter))
2479 			return -EAGAIN;
2480 		if (copy_from_user(&t, useraddr, sizeof(t)))
2481 			return -EFAULT;
2482 
2483 		tp = (const struct trace_params *)&t.sip;
2484 		if (t.config_tx)
2485 			t3_config_trace_filter(adapter, tp, 0,
2486 						t.invert_match,
2487 						t.trace_tx);
2488 		if (t.config_rx)
2489 			t3_config_trace_filter(adapter, tp, 1,
2490 						t.invert_match,
2491 						t.trace_rx);
2492 		break;
2493 	}
2494 	default:
2495 		return -EOPNOTSUPP;
2496 	}
2497 	return 0;
2498 }
2499 
cxgb_ioctl(struct net_device * dev,struct ifreq * req,int cmd)2500 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2501 {
2502 	struct mii_ioctl_data *data = if_mii(req);
2503 	struct port_info *pi = netdev_priv(dev);
2504 	struct adapter *adapter = pi->adapter;
2505 
2506 	switch (cmd) {
2507 	case SIOCGMIIREG:
2508 	case SIOCSMIIREG:
2509 		/* Convert phy_id from older PRTAD/DEVAD format */
2510 		if (is_10G(adapter) &&
2511 		    !mdio_phy_id_is_c45(data->phy_id) &&
2512 		    (data->phy_id & 0x1f00) &&
2513 		    !(data->phy_id & 0xe0e0))
2514 			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2515 						       data->phy_id & 0x1f);
2516 		/* FALLTHRU */
2517 	case SIOCGMIIPHY:
2518 		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2519 	case SIOCCHIOCTL:
2520 		return cxgb_extension_ioctl(dev, req->ifr_data);
2521 	default:
2522 		return -EOPNOTSUPP;
2523 	}
2524 }
2525 
cxgb_change_mtu(struct net_device * dev,int new_mtu)2526 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2527 {
2528 	struct port_info *pi = netdev_priv(dev);
2529 	struct adapter *adapter = pi->adapter;
2530 	int ret;
2531 
2532 	if (new_mtu < 81)	/* accommodate SACK */
2533 		return -EINVAL;
2534 	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2535 		return ret;
2536 	dev->mtu = new_mtu;
2537 	init_port_mtus(adapter);
2538 	if (adapter->params.rev == 0 && offload_running(adapter))
2539 		t3_load_mtus(adapter, adapter->params.mtus,
2540 			     adapter->params.a_wnd, adapter->params.b_wnd,
2541 			     adapter->port[0]->mtu);
2542 	return 0;
2543 }
2544 
cxgb_set_mac_addr(struct net_device * dev,void * p)2545 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2546 {
2547 	struct port_info *pi = netdev_priv(dev);
2548 	struct adapter *adapter = pi->adapter;
2549 	struct sockaddr *addr = p;
2550 
2551 	if (!is_valid_ether_addr(addr->sa_data))
2552 		return -EINVAL;
2553 
2554 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2555 	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2556 	if (offload_running(adapter))
2557 		write_smt_entry(adapter, pi->port_id);
2558 	return 0;
2559 }
2560 
2561 /**
2562  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2563  * @adap: the adapter
2564  * @p: the port
2565  *
2566  * Ensures that current Rx processing on any of the queues associated with
2567  * the given port completes before returning.  We do this by acquiring and
2568  * releasing the locks of the response queues associated with the port.
2569  */
t3_synchronize_rx(struct adapter * adap,const struct port_info * p)2570 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2571 {
2572 	int i;
2573 
2574 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2575 		struct sge_rspq *q = &adap->sge.qs[i].rspq;
2576 
2577 		spin_lock_irq(&q->lock);
2578 		spin_unlock_irq(&q->lock);
2579 	}
2580 }
2581 
vlan_rx_register(struct net_device * dev,struct vlan_group * grp)2582 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2583 {
2584 	struct port_info *pi = netdev_priv(dev);
2585 	struct adapter *adapter = pi->adapter;
2586 
2587 	pi->vlan_grp = grp;
2588 	if (adapter->params.rev > 0)
2589 		t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2590 	else {
2591 		/* single control for all ports */
2592 		unsigned int i, have_vlans = 0;
2593 		for_each_port(adapter, i)
2594 		    have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2595 
2596 		t3_set_vlan_accel(adapter, 1, have_vlans);
2597 	}
2598 	t3_synchronize_rx(adapter, pi);
2599 }
2600 
2601 #ifdef CONFIG_NET_POLL_CONTROLLER
cxgb_netpoll(struct net_device * dev)2602 static void cxgb_netpoll(struct net_device *dev)
2603 {
2604 	struct port_info *pi = netdev_priv(dev);
2605 	struct adapter *adapter = pi->adapter;
2606 	int qidx;
2607 
2608 	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2609 		struct sge_qset *qs = &adapter->sge.qs[qidx];
2610 		void *source;
2611 
2612 		if (adapter->flags & USING_MSIX)
2613 			source = qs;
2614 		else
2615 			source = adapter;
2616 
2617 		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2618 	}
2619 }
2620 #endif
2621 
2622 /*
2623  * Periodic accumulation of MAC statistics.
2624  */
mac_stats_update(struct adapter * adapter)2625 static void mac_stats_update(struct adapter *adapter)
2626 {
2627 	int i;
2628 
2629 	for_each_port(adapter, i) {
2630 		struct net_device *dev = adapter->port[i];
2631 		struct port_info *p = netdev_priv(dev);
2632 
2633 		if (netif_running(dev)) {
2634 			spin_lock(&adapter->stats_lock);
2635 			t3_mac_update_stats(&p->mac);
2636 			spin_unlock(&adapter->stats_lock);
2637 		}
2638 	}
2639 }
2640 
check_link_status(struct adapter * adapter)2641 static void check_link_status(struct adapter *adapter)
2642 {
2643 	int i;
2644 
2645 	for_each_port(adapter, i) {
2646 		struct net_device *dev = adapter->port[i];
2647 		struct port_info *p = netdev_priv(dev);
2648 		int link_fault;
2649 
2650 		spin_lock_irq(&adapter->work_lock);
2651 		link_fault = p->link_fault;
2652 		spin_unlock_irq(&adapter->work_lock);
2653 
2654 		if (link_fault) {
2655 			t3_link_fault(adapter, i);
2656 			continue;
2657 		}
2658 
2659 		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2660 			t3_xgm_intr_disable(adapter, i);
2661 			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2662 
2663 			t3_link_changed(adapter, i);
2664 			t3_xgm_intr_enable(adapter, i);
2665 		}
2666 	}
2667 }
2668 
check_t3b2_mac(struct adapter * adapter)2669 static void check_t3b2_mac(struct adapter *adapter)
2670 {
2671 	int i;
2672 
2673 	if (!rtnl_trylock())	/* synchronize with ifdown */
2674 		return;
2675 
2676 	for_each_port(adapter, i) {
2677 		struct net_device *dev = adapter->port[i];
2678 		struct port_info *p = netdev_priv(dev);
2679 		int status;
2680 
2681 		if (!netif_running(dev))
2682 			continue;
2683 
2684 		status = 0;
2685 		if (netif_running(dev) && netif_carrier_ok(dev))
2686 			status = t3b2_mac_watchdog_task(&p->mac);
2687 		if (status == 1)
2688 			p->mac.stats.num_toggled++;
2689 		else if (status == 2) {
2690 			struct cmac *mac = &p->mac;
2691 
2692 			t3_mac_set_mtu(mac, dev->mtu);
2693 			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2694 			cxgb_set_rxmode(dev);
2695 			t3_link_start(&p->phy, mac, &p->link_config);
2696 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2697 			t3_port_intr_enable(adapter, p->port_id);
2698 			p->mac.stats.num_resets++;
2699 		}
2700 	}
2701 	rtnl_unlock();
2702 }
2703 
2704 
t3_adap_check_task(struct work_struct * work)2705 static void t3_adap_check_task(struct work_struct *work)
2706 {
2707 	struct adapter *adapter = container_of(work, struct adapter,
2708 					       adap_check_task.work);
2709 	const struct adapter_params *p = &adapter->params;
2710 	int port;
2711 	unsigned int v, status, reset;
2712 
2713 	adapter->check_task_cnt++;
2714 
2715 	check_link_status(adapter);
2716 
2717 	/* Accumulate MAC stats if needed */
2718 	if (!p->linkpoll_period ||
2719 	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2720 	    p->stats_update_period) {
2721 		mac_stats_update(adapter);
2722 		adapter->check_task_cnt = 0;
2723 	}
2724 
2725 	if (p->rev == T3_REV_B2)
2726 		check_t3b2_mac(adapter);
2727 
2728 	/*
2729 	 * Scan the XGMAC's to check for various conditions which we want to
2730 	 * monitor in a periodic polling manner rather than via an interrupt
2731 	 * condition.  This is used for conditions which would otherwise flood
2732 	 * the system with interrupts and we only really need to know that the
2733 	 * conditions are "happening" ...  For each condition we count the
2734 	 * detection of the condition and reset it for the next polling loop.
2735 	 */
2736 	for_each_port(adapter, port) {
2737 		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2738 		u32 cause;
2739 
2740 		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2741 		reset = 0;
2742 		if (cause & F_RXFIFO_OVERFLOW) {
2743 			mac->stats.rx_fifo_ovfl++;
2744 			reset |= F_RXFIFO_OVERFLOW;
2745 		}
2746 
2747 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2748 	}
2749 
2750 	/*
2751 	 * We do the same as above for FL_EMPTY interrupts.
2752 	 */
2753 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2754 	reset = 0;
2755 
2756 	if (status & F_FLEMPTY) {
2757 		struct sge_qset *qs = &adapter->sge.qs[0];
2758 		int i = 0;
2759 
2760 		reset |= F_FLEMPTY;
2761 
2762 		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2763 		    0xffff;
2764 
2765 		while (v) {
2766 			qs->fl[i].empty += (v & 1);
2767 			if (i)
2768 				qs++;
2769 			i ^= 1;
2770 			v >>= 1;
2771 		}
2772 	}
2773 
2774 	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2775 
2776 	/* Schedule the next check update if any port is active. */
2777 	spin_lock_irq(&adapter->work_lock);
2778 	if (adapter->open_device_map & PORT_MASK)
2779 		schedule_chk_task(adapter);
2780 	spin_unlock_irq(&adapter->work_lock);
2781 }
2782 
db_full_task(struct work_struct * work)2783 static void db_full_task(struct work_struct *work)
2784 {
2785 	struct adapter *adapter = container_of(work, struct adapter,
2786 					       db_full_task);
2787 
2788 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2789 }
2790 
db_empty_task(struct work_struct * work)2791 static void db_empty_task(struct work_struct *work)
2792 {
2793 	struct adapter *adapter = container_of(work, struct adapter,
2794 					       db_empty_task);
2795 
2796 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2797 }
2798 
db_drop_task(struct work_struct * work)2799 static void db_drop_task(struct work_struct *work)
2800 {
2801 	struct adapter *adapter = container_of(work, struct adapter,
2802 					       db_drop_task);
2803 	unsigned long delay = 1000;
2804 	unsigned short r;
2805 
2806 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2807 
2808 	/*
2809 	 * Sleep a while before ringing the driver qset dbs.
2810 	 * The delay is between 1000-2023 usecs.
2811 	 */
2812 	get_random_bytes(&r, 2);
2813 	delay += r & 1023;
2814 	set_current_state(TASK_UNINTERRUPTIBLE);
2815 	schedule_timeout(usecs_to_jiffies(delay));
2816 	ring_dbs(adapter);
2817 }
2818 
2819 /*
2820  * Processes external (PHY) interrupts in process context.
2821  */
ext_intr_task(struct work_struct * work)2822 static void ext_intr_task(struct work_struct *work)
2823 {
2824 	struct adapter *adapter = container_of(work, struct adapter,
2825 					       ext_intr_handler_task);
2826 	int i;
2827 
2828 	/* Disable link fault interrupts */
2829 	for_each_port(adapter, i) {
2830 		struct net_device *dev = adapter->port[i];
2831 		struct port_info *p = netdev_priv(dev);
2832 
2833 		t3_xgm_intr_disable(adapter, i);
2834 		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2835 	}
2836 
2837 	/* Re-enable link fault interrupts */
2838 	t3_phy_intr_handler(adapter);
2839 
2840 	for_each_port(adapter, i)
2841 		t3_xgm_intr_enable(adapter, i);
2842 
2843 	/* Now reenable external interrupts */
2844 	spin_lock_irq(&adapter->work_lock);
2845 	if (adapter->slow_intr_mask) {
2846 		adapter->slow_intr_mask |= F_T3DBG;
2847 		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2848 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2849 			     adapter->slow_intr_mask);
2850 	}
2851 	spin_unlock_irq(&adapter->work_lock);
2852 }
2853 
2854 /*
2855  * Interrupt-context handler for external (PHY) interrupts.
2856  */
t3_os_ext_intr_handler(struct adapter * adapter)2857 void t3_os_ext_intr_handler(struct adapter *adapter)
2858 {
2859 	/*
2860 	 * Schedule a task to handle external interrupts as they may be slow
2861 	 * and we use a mutex to protect MDIO registers.  We disable PHY
2862 	 * interrupts in the meantime and let the task reenable them when
2863 	 * it's done.
2864 	 */
2865 	spin_lock(&adapter->work_lock);
2866 	if (adapter->slow_intr_mask) {
2867 		adapter->slow_intr_mask &= ~F_T3DBG;
2868 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2869 			     adapter->slow_intr_mask);
2870 		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2871 	}
2872 	spin_unlock(&adapter->work_lock);
2873 }
2874 
t3_os_link_fault_handler(struct adapter * adapter,int port_id)2875 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2876 {
2877 	struct net_device *netdev = adapter->port[port_id];
2878 	struct port_info *pi = netdev_priv(netdev);
2879 
2880 	spin_lock(&adapter->work_lock);
2881 	pi->link_fault = 1;
2882 	spin_unlock(&adapter->work_lock);
2883 }
2884 
t3_adapter_error(struct adapter * adapter,int reset,int on_wq)2885 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2886 {
2887 	int i, ret = 0;
2888 
2889 	if (is_offload(adapter) &&
2890 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2891 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2892 		offload_close(&adapter->tdev);
2893 	}
2894 
2895 	/* Stop all ports */
2896 	for_each_port(adapter, i) {
2897 		struct net_device *netdev = adapter->port[i];
2898 
2899 		if (netif_running(netdev))
2900 			__cxgb_close(netdev, on_wq);
2901 	}
2902 
2903 	/* Stop SGE timers */
2904 	t3_stop_sge_timers(adapter);
2905 
2906 	adapter->flags &= ~FULL_INIT_DONE;
2907 
2908 	if (reset)
2909 		ret = t3_reset_adapter(adapter);
2910 
2911 	pci_disable_device(adapter->pdev);
2912 
2913 	return ret;
2914 }
2915 
t3_reenable_adapter(struct adapter * adapter)2916 static int t3_reenable_adapter(struct adapter *adapter)
2917 {
2918 	if (pci_enable_device(adapter->pdev)) {
2919 		dev_err(&adapter->pdev->dev,
2920 			"Cannot re-enable PCI device after reset.\n");
2921 		goto err;
2922 	}
2923 	pci_set_master(adapter->pdev);
2924 	pci_restore_state(adapter->pdev);
2925 	pci_save_state(adapter->pdev);
2926 
2927 	/* Free sge resources */
2928 	t3_free_sge_resources(adapter);
2929 
2930 	if (t3_replay_prep_adapter(adapter))
2931 		goto err;
2932 
2933 	return 0;
2934 err:
2935 	return -1;
2936 }
2937 
t3_resume_ports(struct adapter * adapter)2938 static void t3_resume_ports(struct adapter *adapter)
2939 {
2940 	int i;
2941 
2942 	/* Restart the ports */
2943 	for_each_port(adapter, i) {
2944 		struct net_device *netdev = adapter->port[i];
2945 
2946 		if (netif_running(netdev)) {
2947 			if (cxgb_open(netdev)) {
2948 				dev_err(&adapter->pdev->dev,
2949 					"can't bring device back up"
2950 					" after reset\n");
2951 				continue;
2952 			}
2953 		}
2954 	}
2955 
2956 	if (is_offload(adapter) && !ofld_disable)
2957 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2958 }
2959 
2960 /*
2961  * processes a fatal error.
2962  * Bring the ports down, reset the chip, bring the ports back up.
2963  */
fatal_error_task(struct work_struct * work)2964 static void fatal_error_task(struct work_struct *work)
2965 {
2966 	struct adapter *adapter = container_of(work, struct adapter,
2967 					       fatal_error_handler_task);
2968 	int err = 0;
2969 
2970 	rtnl_lock();
2971 	err = t3_adapter_error(adapter, 1, 1);
2972 	if (!err)
2973 		err = t3_reenable_adapter(adapter);
2974 	if (!err)
2975 		t3_resume_ports(adapter);
2976 
2977 	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2978 	rtnl_unlock();
2979 }
2980 
t3_fatal_err(struct adapter * adapter)2981 void t3_fatal_err(struct adapter *adapter)
2982 {
2983 	unsigned int fw_status[4];
2984 
2985 	if (adapter->flags & FULL_INIT_DONE) {
2986 		t3_sge_stop(adapter);
2987 		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2988 		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2989 		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2990 		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2991 
2992 		spin_lock(&adapter->work_lock);
2993 		t3_intr_disable(adapter);
2994 		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2995 		spin_unlock(&adapter->work_lock);
2996 	}
2997 	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2998 	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2999 		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3000 			 fw_status[0], fw_status[1],
3001 			 fw_status[2], fw_status[3]);
3002 }
3003 
3004 /**
3005  * t3_io_error_detected - called when PCI error is detected
3006  * @pdev: Pointer to PCI device
3007  * @state: The current pci connection state
3008  *
3009  * This function is called after a PCI bus error affecting
3010  * this device has been detected.
3011  */
t3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)3012 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3013 					     pci_channel_state_t state)
3014 {
3015 	struct adapter *adapter = pci_get_drvdata(pdev);
3016 
3017 	if (state == pci_channel_io_perm_failure)
3018 		return PCI_ERS_RESULT_DISCONNECT;
3019 
3020 	t3_adapter_error(adapter, 0, 0);
3021 
3022 	/* Request a slot reset. */
3023 	return PCI_ERS_RESULT_NEED_RESET;
3024 }
3025 
3026 /**
3027  * t3_io_slot_reset - called after the pci bus has been reset.
3028  * @pdev: Pointer to PCI device
3029  *
3030  * Restart the card from scratch, as if from a cold-boot.
3031  */
t3_io_slot_reset(struct pci_dev * pdev)3032 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3033 {
3034 	struct adapter *adapter = pci_get_drvdata(pdev);
3035 
3036 	if (!t3_reenable_adapter(adapter))
3037 		return PCI_ERS_RESULT_RECOVERED;
3038 
3039 	return PCI_ERS_RESULT_DISCONNECT;
3040 }
3041 
3042 /**
3043  * t3_io_resume - called when traffic can start flowing again.
3044  * @pdev: Pointer to PCI device
3045  *
3046  * This callback is called when the error recovery driver tells us that
3047  * its OK to resume normal operation.
3048  */
t3_io_resume(struct pci_dev * pdev)3049 static void t3_io_resume(struct pci_dev *pdev)
3050 {
3051 	struct adapter *adapter = pci_get_drvdata(pdev);
3052 
3053 	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3054 		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3055 
3056 	t3_resume_ports(adapter);
3057 }
3058 
3059 static struct pci_error_handlers t3_err_handler = {
3060 	.error_detected = t3_io_error_detected,
3061 	.slot_reset = t3_io_slot_reset,
3062 	.resume = t3_io_resume,
3063 };
3064 
3065 /*
3066  * Set the number of qsets based on the number of CPUs and the number of ports,
3067  * not to exceed the number of available qsets, assuming there are enough qsets
3068  * per port in HW.
3069  */
set_nqsets(struct adapter * adap)3070 static void set_nqsets(struct adapter *adap)
3071 {
3072 	int i, j = 0;
3073 	int num_cpus = num_online_cpus();
3074 	int hwports = adap->params.nports;
3075 	int nqsets = adap->msix_nvectors - 1;
3076 
3077 	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3078 		if (hwports == 2 &&
3079 		    (hwports * nqsets > SGE_QSETS ||
3080 		     num_cpus >= nqsets / hwports))
3081 			nqsets /= hwports;
3082 		if (nqsets > num_cpus)
3083 			nqsets = num_cpus;
3084 		if (nqsets < 1 || hwports == 4)
3085 			nqsets = 1;
3086 	} else
3087 		nqsets = 1;
3088 
3089 	for_each_port(adap, i) {
3090 		struct port_info *pi = adap2pinfo(adap, i);
3091 
3092 		pi->first_qset = j;
3093 		pi->nqsets = nqsets;
3094 		j = pi->first_qset + nqsets;
3095 
3096 		dev_info(&adap->pdev->dev,
3097 			 "Port %d using %d queue sets.\n", i, nqsets);
3098 	}
3099 }
3100 
cxgb_enable_msix(struct adapter * adap)3101 static int __devinit cxgb_enable_msix(struct adapter *adap)
3102 {
3103 	struct msix_entry entries[SGE_QSETS + 1];
3104 	int vectors;
3105 	int i, err;
3106 
3107 	vectors = ARRAY_SIZE(entries);
3108 	for (i = 0; i < vectors; ++i)
3109 		entries[i].entry = i;
3110 
3111 	while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3112 		vectors = err;
3113 
3114 	if (err < 0)
3115 		pci_disable_msix(adap->pdev);
3116 
3117 	if (!err && vectors < (adap->params.nports + 1)) {
3118 		pci_disable_msix(adap->pdev);
3119 		err = -1;
3120 	}
3121 
3122 	if (!err) {
3123 		for (i = 0; i < vectors; ++i)
3124 			adap->msix_info[i].vec = entries[i].vector;
3125 		adap->msix_nvectors = vectors;
3126 	}
3127 
3128 	return err;
3129 }
3130 
print_port_info(struct adapter * adap,const struct adapter_info * ai)3131 static void __devinit print_port_info(struct adapter *adap,
3132 				      const struct adapter_info *ai)
3133 {
3134 	static const char *pci_variant[] = {
3135 		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3136 	};
3137 
3138 	int i;
3139 	char buf[80];
3140 
3141 	if (is_pcie(adap))
3142 		snprintf(buf, sizeof(buf), "%s x%d",
3143 			 pci_variant[adap->params.pci.variant],
3144 			 adap->params.pci.width);
3145 	else
3146 		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3147 			 pci_variant[adap->params.pci.variant],
3148 			 adap->params.pci.speed, adap->params.pci.width);
3149 
3150 	for_each_port(adap, i) {
3151 		struct net_device *dev = adap->port[i];
3152 		const struct port_info *pi = netdev_priv(dev);
3153 
3154 		if (!test_bit(i, &adap->registered_device_map))
3155 			continue;
3156 		printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3157 		       dev->name, ai->desc, pi->phy.desc,
3158 		       is_offload(adap) ? "R" : "", adap->params.rev, buf,
3159 		       (adap->flags & USING_MSIX) ? " MSI-X" :
3160 		       (adap->flags & USING_MSI) ? " MSI" : "");
3161 		if (adap->name == dev->name && adap->params.vpd.mclk)
3162 			printk(KERN_INFO
3163 			       "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3164 			       adap->name, t3_mc7_size(&adap->cm) >> 20,
3165 			       t3_mc7_size(&adap->pmtx) >> 20,
3166 			       t3_mc7_size(&adap->pmrx) >> 20,
3167 			       adap->params.vpd.sn);
3168 	}
3169 }
3170 
3171 static const struct net_device_ops cxgb_netdev_ops = {
3172 	.ndo_open		= cxgb_open,
3173 	.ndo_stop		= cxgb_close,
3174 	.ndo_start_xmit		= t3_eth_xmit,
3175 	.ndo_get_stats		= cxgb_get_stats,
3176 	.ndo_validate_addr	= eth_validate_addr,
3177 	.ndo_set_multicast_list	= cxgb_set_rxmode,
3178 	.ndo_do_ioctl		= cxgb_ioctl,
3179 	.ndo_change_mtu		= cxgb_change_mtu,
3180 	.ndo_set_mac_address	= cxgb_set_mac_addr,
3181 	.ndo_vlan_rx_register	= vlan_rx_register,
3182 #ifdef CONFIG_NET_POLL_CONTROLLER
3183 	.ndo_poll_controller	= cxgb_netpoll,
3184 #endif
3185 };
3186 
cxgb3_init_iscsi_mac(struct net_device * dev)3187 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3188 {
3189 	struct port_info *pi = netdev_priv(dev);
3190 
3191 	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3192 	pi->iscsic.mac_addr[3] |= 0x80;
3193 }
3194 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)3195 static int __devinit init_one(struct pci_dev *pdev,
3196 			      const struct pci_device_id *ent)
3197 {
3198 	static int version_printed;
3199 
3200 	int i, err, pci_using_dac = 0;
3201 	resource_size_t mmio_start, mmio_len;
3202 	const struct adapter_info *ai;
3203 	struct adapter *adapter = NULL;
3204 	struct port_info *pi;
3205 
3206 	if (!version_printed) {
3207 		printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3208 		++version_printed;
3209 	}
3210 
3211 	if (!cxgb3_wq) {
3212 		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3213 		if (!cxgb3_wq) {
3214 			printk(KERN_ERR DRV_NAME
3215 			       ": cannot initialize work queue\n");
3216 			return -ENOMEM;
3217 		}
3218 	}
3219 
3220 	err = pci_enable_device(pdev);
3221 	if (err) {
3222 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3223 		goto out;
3224 	}
3225 
3226 	err = pci_request_regions(pdev, DRV_NAME);
3227 	if (err) {
3228 		/* Just info, some other driver may have claimed the device. */
3229 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3230 		goto out_disable_device;
3231 	}
3232 
3233 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3234 		pci_using_dac = 1;
3235 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3236 		if (err) {
3237 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3238 			       "coherent allocations\n");
3239 			goto out_release_regions;
3240 		}
3241 	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3242 		dev_err(&pdev->dev, "no usable DMA configuration\n");
3243 		goto out_release_regions;
3244 	}
3245 
3246 	pci_set_master(pdev);
3247 	pci_save_state(pdev);
3248 
3249 	mmio_start = pci_resource_start(pdev, 0);
3250 	mmio_len = pci_resource_len(pdev, 0);
3251 	ai = t3_get_adapter_info(ent->driver_data);
3252 
3253 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3254 	if (!adapter) {
3255 		err = -ENOMEM;
3256 		goto out_release_regions;
3257 	}
3258 
3259 	adapter->nofail_skb =
3260 		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3261 	if (!adapter->nofail_skb) {
3262 		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3263 		err = -ENOMEM;
3264 		goto out_free_adapter;
3265 	}
3266 
3267 	adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3268 	if (!adapter->regs) {
3269 		dev_err(&pdev->dev, "cannot map device registers\n");
3270 		err = -ENOMEM;
3271 		goto out_free_adapter;
3272 	}
3273 
3274 	adapter->pdev = pdev;
3275 	adapter->name = pci_name(pdev);
3276 	adapter->msg_enable = dflt_msg_enable;
3277 	adapter->mmio_len = mmio_len;
3278 
3279 	mutex_init(&adapter->mdio_lock);
3280 	spin_lock_init(&adapter->work_lock);
3281 	spin_lock_init(&adapter->stats_lock);
3282 
3283 	INIT_LIST_HEAD(&adapter->adapter_list);
3284 	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3285 	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3286 
3287 	INIT_WORK(&adapter->db_full_task, db_full_task);
3288 	INIT_WORK(&adapter->db_empty_task, db_empty_task);
3289 	INIT_WORK(&adapter->db_drop_task, db_drop_task);
3290 
3291 	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3292 
3293 	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3294 		struct net_device *netdev;
3295 
3296 		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3297 		if (!netdev) {
3298 			err = -ENOMEM;
3299 			goto out_free_dev;
3300 		}
3301 
3302 		SET_NETDEV_DEV(netdev, &pdev->dev);
3303 
3304 		adapter->port[i] = netdev;
3305 		pi = netdev_priv(netdev);
3306 		pi->adapter = adapter;
3307 		pi->rx_offload = T3_RX_CSUM | T3_LRO;
3308 		pi->port_id = i;
3309 		netif_carrier_off(netdev);
3310 		netdev->irq = pdev->irq;
3311 		netdev->mem_start = mmio_start;
3312 		netdev->mem_end = mmio_start + mmio_len - 1;
3313 		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3314 		netdev->features |= NETIF_F_GRO;
3315 		if (pci_using_dac)
3316 			netdev->features |= NETIF_F_HIGHDMA;
3317 
3318 		netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3319 		netdev->netdev_ops = &cxgb_netdev_ops;
3320 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3321 	}
3322 
3323 	pci_set_drvdata(pdev, adapter);
3324 	if (t3_prep_adapter(adapter, ai, 1) < 0) {
3325 		err = -ENODEV;
3326 		goto out_free_dev;
3327 	}
3328 
3329 	/*
3330 	 * The card is now ready to go.  If any errors occur during device
3331 	 * registration we do not fail the whole card but rather proceed only
3332 	 * with the ports we manage to register successfully.  However we must
3333 	 * register at least one net device.
3334 	 */
3335 	for_each_port(adapter, i) {
3336 		err = register_netdev(adapter->port[i]);
3337 		if (err)
3338 			dev_warn(&pdev->dev,
3339 				 "cannot register net device %s, skipping\n",
3340 				 adapter->port[i]->name);
3341 		else {
3342 			/*
3343 			 * Change the name we use for messages to the name of
3344 			 * the first successfully registered interface.
3345 			 */
3346 			if (!adapter->registered_device_map)
3347 				adapter->name = adapter->port[i]->name;
3348 
3349 			__set_bit(i, &adapter->registered_device_map);
3350 		}
3351 	}
3352 	if (!adapter->registered_device_map) {
3353 		dev_err(&pdev->dev, "could not register any net devices\n");
3354 		goto out_free_dev;
3355 	}
3356 
3357 	for_each_port(adapter, i)
3358 		cxgb3_init_iscsi_mac(adapter->port[i]);
3359 
3360 	/* Driver's ready. Reflect it on LEDs */
3361 	t3_led_ready(adapter);
3362 
3363 	if (is_offload(adapter)) {
3364 		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3365 		cxgb3_adapter_ofld(adapter);
3366 	}
3367 
3368 	/* See what interrupts we'll be using */
3369 	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3370 		adapter->flags |= USING_MSIX;
3371 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3372 		adapter->flags |= USING_MSI;
3373 
3374 	set_nqsets(adapter);
3375 
3376 	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3377 				 &cxgb3_attr_group);
3378 
3379 	print_port_info(adapter, ai);
3380 	return 0;
3381 
3382 out_free_dev:
3383 	iounmap(adapter->regs);
3384 	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3385 		if (adapter->port[i])
3386 			free_netdev(adapter->port[i]);
3387 
3388 out_free_adapter:
3389 	kfree(adapter);
3390 
3391 out_release_regions:
3392 	pci_release_regions(pdev);
3393 out_disable_device:
3394 	pci_disable_device(pdev);
3395 	pci_set_drvdata(pdev, NULL);
3396 out:
3397 	return err;
3398 }
3399 
remove_one(struct pci_dev * pdev)3400 static void __devexit remove_one(struct pci_dev *pdev)
3401 {
3402 	struct adapter *adapter = pci_get_drvdata(pdev);
3403 
3404 	if (adapter) {
3405 		int i;
3406 
3407 		t3_sge_stop(adapter);
3408 		sysfs_remove_group(&adapter->port[0]->dev.kobj,
3409 				   &cxgb3_attr_group);
3410 
3411 		if (is_offload(adapter)) {
3412 			cxgb3_adapter_unofld(adapter);
3413 			if (test_bit(OFFLOAD_DEVMAP_BIT,
3414 				     &adapter->open_device_map))
3415 				offload_close(&adapter->tdev);
3416 		}
3417 
3418 		for_each_port(adapter, i)
3419 		    if (test_bit(i, &adapter->registered_device_map))
3420 			unregister_netdev(adapter->port[i]);
3421 
3422 		t3_stop_sge_timers(adapter);
3423 		t3_free_sge_resources(adapter);
3424 		cxgb_disable_msi(adapter);
3425 
3426 		for_each_port(adapter, i)
3427 			if (adapter->port[i])
3428 				free_netdev(adapter->port[i]);
3429 
3430 		iounmap(adapter->regs);
3431 		if (adapter->nofail_skb)
3432 			kfree_skb(adapter->nofail_skb);
3433 		kfree(adapter);
3434 		pci_release_regions(pdev);
3435 		pci_disable_device(pdev);
3436 		pci_set_drvdata(pdev, NULL);
3437 	}
3438 }
3439 
3440 static struct pci_driver driver = {
3441 	.name = DRV_NAME,
3442 	.id_table = cxgb3_pci_tbl,
3443 	.probe = init_one,
3444 	.remove = __devexit_p(remove_one),
3445 	.err_handler = &t3_err_handler,
3446 };
3447 
cxgb3_init_module(void)3448 static int __init cxgb3_init_module(void)
3449 {
3450 	int ret;
3451 
3452 	cxgb3_offload_init();
3453 
3454 	ret = pci_register_driver(&driver);
3455 	return ret;
3456 }
3457 
cxgb3_cleanup_module(void)3458 static void __exit cxgb3_cleanup_module(void)
3459 {
3460 	pci_unregister_driver(&driver);
3461 	if (cxgb3_wq)
3462 		destroy_workqueue(cxgb3_wq);
3463 }
3464 
3465 module_init(cxgb3_init_module);
3466 module_exit(cxgb3_cleanup_module);
3467