1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if_vlan.h>
45 #include <linux/init.h>
46 #include <linux/log2.h>
47 #include <linux/mdio.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/mutex.h>
51 #include <linux/netdevice.h>
52 #include <linux/pci.h>
53 #include <linux/aer.h>
54 #include <linux/rtnetlink.h>
55 #include <linux/sched.h>
56 #include <linux/seq_file.h>
57 #include <linux/sockios.h>
58 #include <linux/vmalloc.h>
59 #include <linux/workqueue.h>
60 #include <net/neighbour.h>
61 #include <net/netevent.h>
62 #include <asm/uaccess.h>
63 
64 #include "cxgb4.h"
65 #include "t4_regs.h"
66 #include "t4_msg.h"
67 #include "t4fw_api.h"
68 #include "l2t.h"
69 
70 #define DRV_VERSION "1.3.0-ko"
71 #define DRV_DESC "Chelsio T4 Network Driver"
72 
73 /*
74  * Max interrupt hold-off timer value in us.  Queues fall back to this value
75  * under extreme memory pressure so it's largish to give the system time to
76  * recover.
77  */
78 #define MAX_SGE_TIMERVAL 200U
79 
80 #ifdef CONFIG_PCI_IOV
81 /*
82  * Virtual Function provisioning constants.  We need two extra Ingress Queues
83  * with Interrupt capability to serve as the VF's Firmware Event Queue and
84  * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85  * Lists associated with them).  For each Ethernet/Control Egress Queue and
86  * for each Free List, we need an Egress Context.
87  */
88 enum {
89 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
90 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
91 
92 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
93 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
94 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
96 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
97 	VFRES_TC = 0,			/* PCI-E traffic class */
98 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
99 
100 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102 };
103 
104 /*
105  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
106  * static and likely not to be useful in the long run.  We really need to
107  * implement some form of persistent configuration which the firmware
108  * controls.
109  */
pfvfres_pmask(struct adapter * adapter,unsigned int pf,unsigned int vf)110 static unsigned int pfvfres_pmask(struct adapter *adapter,
111 				  unsigned int pf, unsigned int vf)
112 {
113 	unsigned int portn, portvec;
114 
115 	/*
116 	 * Give PF's access to all of the ports.
117 	 */
118 	if (vf == 0)
119 		return FW_PFVF_CMD_PMASK_MASK;
120 
121 	/*
122 	 * For VFs, we'll assign them access to the ports based purely on the
123 	 * PF.  We assign active ports in order, wrapping around if there are
124 	 * fewer active ports than PFs: e.g. active port[pf % nports].
125 	 * Unfortunately the adapter's port_info structs haven't been
126 	 * initialized yet so we have to compute this.
127 	 */
128 	if (adapter->params.nports == 0)
129 		return 0;
130 
131 	portn = pf % adapter->params.nports;
132 	portvec = adapter->params.portvec;
133 	for (;;) {
134 		/*
135 		 * Isolate the lowest set bit in the port vector.  If we're at
136 		 * the port number that we want, return that as the pmask.
137 		 * otherwise mask that bit out of the port vector and
138 		 * decrement our port number ...
139 		 */
140 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 		if (portn == 0)
142 			return pmask;
143 		portn--;
144 		portvec &= ~pmask;
145 	}
146 	/*NOTREACHED*/
147 }
148 #endif
149 
150 enum {
151 	MEMWIN0_APERTURE = 65536,
152 	MEMWIN0_BASE     = 0x30000,
153 	MEMWIN1_APERTURE = 32768,
154 	MEMWIN1_BASE     = 0x28000,
155 	MEMWIN2_APERTURE = 2048,
156 	MEMWIN2_BASE     = 0x1b800,
157 };
158 
159 enum {
160 	MAX_TXQ_ENTRIES      = 16384,
161 	MAX_CTRL_TXQ_ENTRIES = 1024,
162 	MAX_RSPQ_ENTRIES     = 16384,
163 	MAX_RX_BUFFERS       = 16384,
164 	MIN_TXQ_ENTRIES      = 32,
165 	MIN_CTRL_TXQ_ENTRIES = 32,
166 	MIN_RSPQ_ENTRIES     = 128,
167 	MIN_FL_ENTRIES       = 16
168 };
169 
170 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173 
174 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
175 
176 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
177 	CH_DEVICE(0xa000, 0),  /* PE10K */
178 	CH_DEVICE(0x4001, -1),
179 	CH_DEVICE(0x4002, -1),
180 	CH_DEVICE(0x4003, -1),
181 	CH_DEVICE(0x4004, -1),
182 	CH_DEVICE(0x4005, -1),
183 	CH_DEVICE(0x4006, -1),
184 	CH_DEVICE(0x4007, -1),
185 	CH_DEVICE(0x4008, -1),
186 	CH_DEVICE(0x4009, -1),
187 	CH_DEVICE(0x400a, -1),
188 	CH_DEVICE(0x4401, 4),
189 	CH_DEVICE(0x4402, 4),
190 	CH_DEVICE(0x4403, 4),
191 	CH_DEVICE(0x4404, 4),
192 	CH_DEVICE(0x4405, 4),
193 	CH_DEVICE(0x4406, 4),
194 	CH_DEVICE(0x4407, 4),
195 	CH_DEVICE(0x4408, 4),
196 	CH_DEVICE(0x4409, 4),
197 	CH_DEVICE(0x440a, 4),
198 	{ 0, }
199 };
200 
201 #define FW_FNAME "cxgb4/t4fw.bin"
202 
203 MODULE_DESCRIPTION(DRV_DESC);
204 MODULE_AUTHOR("Chelsio Communications");
205 MODULE_LICENSE("Dual BSD/GPL");
206 MODULE_VERSION(DRV_VERSION);
207 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
208 MODULE_FIRMWARE(FW_FNAME);
209 
210 static int dflt_msg_enable = DFLT_MSG_ENABLE;
211 
212 module_param(dflt_msg_enable, int, 0644);
213 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
214 
215 /*
216  * The driver uses the best interrupt scheme available on a platform in the
217  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
218  * of these schemes the driver may consider as follows:
219  *
220  * msi = 2: choose from among all three options
221  * msi = 1: only consider MSI and INTx interrupts
222  * msi = 0: force INTx interrupts
223  */
224 static int msi = 2;
225 
226 module_param(msi, int, 0644);
227 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
228 
229 /*
230  * Queue interrupt hold-off timer values.  Queues default to the first of these
231  * upon creation.
232  */
233 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
234 
235 module_param_array(intr_holdoff, uint, NULL, 0644);
236 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
237 		 "0..4 in microseconds");
238 
239 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
240 
241 module_param_array(intr_cnt, uint, NULL, 0644);
242 MODULE_PARM_DESC(intr_cnt,
243 		 "thresholds 1..3 for queue interrupt packet counters");
244 
245 static int vf_acls;
246 
247 #ifdef CONFIG_PCI_IOV
248 module_param(vf_acls, bool, 0644);
249 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
250 
251 static unsigned int num_vf[4];
252 
253 module_param_array(num_vf, uint, NULL, 0644);
254 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
255 #endif
256 
257 static struct dentry *cxgb4_debugfs_root;
258 
259 static LIST_HEAD(adapter_list);
260 static DEFINE_MUTEX(uld_mutex);
261 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
262 static const char *uld_str[] = { "RDMA", "iSCSI" };
263 
link_report(struct net_device * dev)264 static void link_report(struct net_device *dev)
265 {
266 	if (!netif_carrier_ok(dev))
267 		netdev_info(dev, "link down\n");
268 	else {
269 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
270 
271 		const char *s = "10Mbps";
272 		const struct port_info *p = netdev_priv(dev);
273 
274 		switch (p->link_cfg.speed) {
275 		case SPEED_10000:
276 			s = "10Gbps";
277 			break;
278 		case SPEED_1000:
279 			s = "1000Mbps";
280 			break;
281 		case SPEED_100:
282 			s = "100Mbps";
283 			break;
284 		}
285 
286 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
287 			    fc[p->link_cfg.fc]);
288 	}
289 }
290 
t4_os_link_changed(struct adapter * adapter,int port_id,int link_stat)291 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
292 {
293 	struct net_device *dev = adapter->port[port_id];
294 
295 	/* Skip changes from disabled ports. */
296 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
297 		if (link_stat)
298 			netif_carrier_on(dev);
299 		else
300 			netif_carrier_off(dev);
301 
302 		link_report(dev);
303 	}
304 }
305 
t4_os_portmod_changed(const struct adapter * adap,int port_id)306 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
307 {
308 	static const char *mod_str[] = {
309 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 	};
311 
312 	const struct net_device *dev = adap->port[port_id];
313 	const struct port_info *pi = netdev_priv(dev);
314 
315 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 		netdev_info(dev, "port module unplugged\n");
317 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
318 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319 }
320 
321 /*
322  * Configure the exact and hash address filters to handle a port's multicast
323  * and secondary unicast MAC addresses.
324  */
set_addr_filters(const struct net_device * dev,bool sleep)325 static int set_addr_filters(const struct net_device *dev, bool sleep)
326 {
327 	u64 mhash = 0;
328 	u64 uhash = 0;
329 	bool free = true;
330 	u16 filt_idx[7];
331 	const u8 *addr[7];
332 	int ret, naddr = 0;
333 	const struct netdev_hw_addr *ha;
334 	int uc_cnt = netdev_uc_count(dev);
335 	int mc_cnt = netdev_mc_count(dev);
336 	const struct port_info *pi = netdev_priv(dev);
337 	unsigned int mb = pi->adapter->fn;
338 
339 	/* first do the secondary unicast addresses */
340 	netdev_for_each_uc_addr(ha, dev) {
341 		addr[naddr++] = ha->addr;
342 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
343 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
344 					naddr, addr, filt_idx, &uhash, sleep);
345 			if (ret < 0)
346 				return ret;
347 
348 			free = false;
349 			naddr = 0;
350 		}
351 	}
352 
353 	/* next set up the multicast addresses */
354 	netdev_for_each_mc_addr(ha, dev) {
355 		addr[naddr++] = ha->addr;
356 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
357 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
358 					naddr, addr, filt_idx, &mhash, sleep);
359 			if (ret < 0)
360 				return ret;
361 
362 			free = false;
363 			naddr = 0;
364 		}
365 	}
366 
367 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
368 				uhash | mhash, sleep);
369 }
370 
371 /*
372  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
373  * If @mtu is -1 it is left unchanged.
374  */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)375 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
376 {
377 	int ret;
378 	struct port_info *pi = netdev_priv(dev);
379 
380 	ret = set_addr_filters(dev, sleep_ok);
381 	if (ret == 0)
382 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
383 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
384 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
385 				    sleep_ok);
386 	return ret;
387 }
388 
389 /**
390  *	link_start - enable a port
391  *	@dev: the port to enable
392  *
393  *	Performs the MAC and PHY actions needed to enable a port.
394  */
link_start(struct net_device * dev)395 static int link_start(struct net_device *dev)
396 {
397 	int ret;
398 	struct port_info *pi = netdev_priv(dev);
399 	unsigned int mb = pi->adapter->fn;
400 
401 	/*
402 	 * We do not set address filters and promiscuity here, the stack does
403 	 * that step explicitly.
404 	 */
405 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
406 			    !!(dev->features & NETIF_F_HW_VLAN_RX), true);
407 	if (ret == 0) {
408 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
409 				    pi->xact_addr_filt, dev->dev_addr, true,
410 				    true);
411 		if (ret >= 0) {
412 			pi->xact_addr_filt = ret;
413 			ret = 0;
414 		}
415 	}
416 	if (ret == 0)
417 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
418 				    &pi->link_cfg);
419 	if (ret == 0)
420 		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
421 	return ret;
422 }
423 
424 /*
425  * Response queue handler for the FW event queue.
426  */
fwevtq_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)427 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
428 			  const struct pkt_gl *gl)
429 {
430 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
431 
432 	rsp++;                                          /* skip RSS header */
433 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
434 		const struct cpl_sge_egr_update *p = (void *)rsp;
435 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
436 		struct sge_txq *txq;
437 
438 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
439 		txq->restarts++;
440 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
441 			struct sge_eth_txq *eq;
442 
443 			eq = container_of(txq, struct sge_eth_txq, q);
444 			netif_tx_wake_queue(eq->txq);
445 		} else {
446 			struct sge_ofld_txq *oq;
447 
448 			oq = container_of(txq, struct sge_ofld_txq, q);
449 			tasklet_schedule(&oq->qresume_tsk);
450 		}
451 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
452 		const struct cpl_fw6_msg *p = (void *)rsp;
453 
454 		if (p->type == 0)
455 			t4_handle_fw_rpl(q->adap, p->data);
456 	} else if (opcode == CPL_L2T_WRITE_RPL) {
457 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
458 
459 		do_l2t_write_rpl(q->adap, p);
460 	} else
461 		dev_err(q->adap->pdev_dev,
462 			"unexpected CPL %#x on FW event queue\n", opcode);
463 	return 0;
464 }
465 
466 /**
467  *	uldrx_handler - response queue handler for ULD queues
468  *	@q: the response queue that received the packet
469  *	@rsp: the response queue descriptor holding the offload message
470  *	@gl: the gather list of packet fragments
471  *
472  *	Deliver an ingress offload packet to a ULD.  All processing is done by
473  *	the ULD, we just maintain statistics.
474  */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)475 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
476 			 const struct pkt_gl *gl)
477 {
478 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
479 
480 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
481 		rxq->stats.nomem++;
482 		return -1;
483 	}
484 	if (gl == NULL)
485 		rxq->stats.imm++;
486 	else if (gl == CXGB4_MSG_AN)
487 		rxq->stats.an++;
488 	else
489 		rxq->stats.pkts++;
490 	return 0;
491 }
492 
disable_msi(struct adapter * adapter)493 static void disable_msi(struct adapter *adapter)
494 {
495 	if (adapter->flags & USING_MSIX) {
496 		pci_disable_msix(adapter->pdev);
497 		adapter->flags &= ~USING_MSIX;
498 	} else if (adapter->flags & USING_MSI) {
499 		pci_disable_msi(adapter->pdev);
500 		adapter->flags &= ~USING_MSI;
501 	}
502 }
503 
504 /*
505  * Interrupt handler for non-data events used with MSI-X.
506  */
t4_nondata_intr(int irq,void * cookie)507 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
508 {
509 	struct adapter *adap = cookie;
510 
511 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
512 	if (v & PFSW) {
513 		adap->swintr = 1;
514 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
515 	}
516 	t4_slow_intr_handler(adap);
517 	return IRQ_HANDLED;
518 }
519 
520 /*
521  * Name the MSI-X interrupts.
522  */
name_msix_vecs(struct adapter * adap)523 static void name_msix_vecs(struct adapter *adap)
524 {
525 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
526 
527 	/* non-data interrupts */
528 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
529 
530 	/* FW events */
531 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
532 		 adap->port[0]->name);
533 
534 	/* Ethernet queues */
535 	for_each_port(adap, j) {
536 		struct net_device *d = adap->port[j];
537 		const struct port_info *pi = netdev_priv(d);
538 
539 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
540 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
541 				 d->name, i);
542 	}
543 
544 	/* offload queues */
545 	for_each_ofldrxq(&adap->sge, i)
546 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
547 			 adap->port[0]->name, i);
548 
549 	for_each_rdmarxq(&adap->sge, i)
550 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
551 			 adap->port[0]->name, i);
552 }
553 
request_msix_queue_irqs(struct adapter * adap)554 static int request_msix_queue_irqs(struct adapter *adap)
555 {
556 	struct sge *s = &adap->sge;
557 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
558 
559 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
560 			  adap->msix_info[1].desc, &s->fw_evtq);
561 	if (err)
562 		return err;
563 
564 	for_each_ethrxq(s, ethqidx) {
565 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
566 				  adap->msix_info[msi].desc,
567 				  &s->ethrxq[ethqidx].rspq);
568 		if (err)
569 			goto unwind;
570 		msi++;
571 	}
572 	for_each_ofldrxq(s, ofldqidx) {
573 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
574 				  adap->msix_info[msi].desc,
575 				  &s->ofldrxq[ofldqidx].rspq);
576 		if (err)
577 			goto unwind;
578 		msi++;
579 	}
580 	for_each_rdmarxq(s, rdmaqidx) {
581 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
582 				  adap->msix_info[msi].desc,
583 				  &s->rdmarxq[rdmaqidx].rspq);
584 		if (err)
585 			goto unwind;
586 		msi++;
587 	}
588 	return 0;
589 
590 unwind:
591 	while (--rdmaqidx >= 0)
592 		free_irq(adap->msix_info[--msi].vec,
593 			 &s->rdmarxq[rdmaqidx].rspq);
594 	while (--ofldqidx >= 0)
595 		free_irq(adap->msix_info[--msi].vec,
596 			 &s->ofldrxq[ofldqidx].rspq);
597 	while (--ethqidx >= 0)
598 		free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
599 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
600 	return err;
601 }
602 
free_msix_queue_irqs(struct adapter * adap)603 static void free_msix_queue_irqs(struct adapter *adap)
604 {
605 	int i, msi = 2;
606 	struct sge *s = &adap->sge;
607 
608 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
609 	for_each_ethrxq(s, i)
610 		free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
611 	for_each_ofldrxq(s, i)
612 		free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
613 	for_each_rdmarxq(s, i)
614 		free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
615 }
616 
617 /**
618  *	write_rss - write the RSS table for a given port
619  *	@pi: the port
620  *	@queues: array of queue indices for RSS
621  *
622  *	Sets up the portion of the HW RSS table for the port's VI to distribute
623  *	packets to the Rx queues in @queues.
624  */
write_rss(const struct port_info * pi,const u16 * queues)625 static int write_rss(const struct port_info *pi, const u16 *queues)
626 {
627 	u16 *rss;
628 	int i, err;
629 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
630 
631 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
632 	if (!rss)
633 		return -ENOMEM;
634 
635 	/* map the queue indices to queue ids */
636 	for (i = 0; i < pi->rss_size; i++, queues++)
637 		rss[i] = q[*queues].rspq.abs_id;
638 
639 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
640 				  pi->rss_size, rss, pi->rss_size);
641 	kfree(rss);
642 	return err;
643 }
644 
645 /**
646  *	setup_rss - configure RSS
647  *	@adap: the adapter
648  *
649  *	Sets up RSS for each port.
650  */
setup_rss(struct adapter * adap)651 static int setup_rss(struct adapter *adap)
652 {
653 	int i, err;
654 
655 	for_each_port(adap, i) {
656 		const struct port_info *pi = adap2pinfo(adap, i);
657 
658 		err = write_rss(pi, pi->rss);
659 		if (err)
660 			return err;
661 	}
662 	return 0;
663 }
664 
665 /*
666  * Return the channel of the ingress queue with the given qid.
667  */
rxq_to_chan(const struct sge * p,unsigned int qid)668 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
669 {
670 	qid -= p->ingr_start;
671 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
672 }
673 
674 /*
675  * Wait until all NAPI handlers are descheduled.
676  */
quiesce_rx(struct adapter * adap)677 static void quiesce_rx(struct adapter *adap)
678 {
679 	int i;
680 
681 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
682 		struct sge_rspq *q = adap->sge.ingr_map[i];
683 
684 		if (q && q->handler)
685 			napi_disable(&q->napi);
686 	}
687 }
688 
689 /*
690  * Enable NAPI scheduling and interrupt generation for all Rx queues.
691  */
enable_rx(struct adapter * adap)692 static void enable_rx(struct adapter *adap)
693 {
694 	int i;
695 
696 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
697 		struct sge_rspq *q = adap->sge.ingr_map[i];
698 
699 		if (!q)
700 			continue;
701 		if (q->handler)
702 			napi_enable(&q->napi);
703 		/* 0-increment GTS to start the timer and enable interrupts */
704 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
705 			     SEINTARM(q->intr_params) |
706 			     INGRESSQID(q->cntxt_id));
707 	}
708 }
709 
710 /**
711  *	setup_sge_queues - configure SGE Tx/Rx/response queues
712  *	@adap: the adapter
713  *
714  *	Determines how many sets of SGE queues to use and initializes them.
715  *	We support multiple queue sets per port if we have MSI-X, otherwise
716  *	just one queue set per port.
717  */
setup_sge_queues(struct adapter * adap)718 static int setup_sge_queues(struct adapter *adap)
719 {
720 	int err, msi_idx, i, j;
721 	struct sge *s = &adap->sge;
722 
723 	bitmap_zero(s->starving_fl, MAX_EGRQ);
724 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
725 
726 	if (adap->flags & USING_MSIX)
727 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
728 	else {
729 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
730 				       NULL, NULL);
731 		if (err)
732 			return err;
733 		msi_idx = -((int)s->intrq.abs_id + 1);
734 	}
735 
736 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
737 			       msi_idx, NULL, fwevtq_handler);
738 	if (err) {
739 freeout:	t4_free_sge_resources(adap);
740 		return err;
741 	}
742 
743 	for_each_port(adap, i) {
744 		struct net_device *dev = adap->port[i];
745 		struct port_info *pi = netdev_priv(dev);
746 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
747 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
748 
749 		for (j = 0; j < pi->nqsets; j++, q++) {
750 			if (msi_idx > 0)
751 				msi_idx++;
752 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
753 					       msi_idx, &q->fl,
754 					       t4_ethrx_handler);
755 			if (err)
756 				goto freeout;
757 			q->rspq.idx = j;
758 			memset(&q->stats, 0, sizeof(q->stats));
759 		}
760 		for (j = 0; j < pi->nqsets; j++, t++) {
761 			err = t4_sge_alloc_eth_txq(adap, t, dev,
762 					netdev_get_tx_queue(dev, j),
763 					s->fw_evtq.cntxt_id);
764 			if (err)
765 				goto freeout;
766 		}
767 	}
768 
769 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
770 	for_each_ofldrxq(s, i) {
771 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
772 		struct net_device *dev = adap->port[i / j];
773 
774 		if (msi_idx > 0)
775 			msi_idx++;
776 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
777 				       &q->fl, uldrx_handler);
778 		if (err)
779 			goto freeout;
780 		memset(&q->stats, 0, sizeof(q->stats));
781 		s->ofld_rxq[i] = q->rspq.abs_id;
782 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
783 					    s->fw_evtq.cntxt_id);
784 		if (err)
785 			goto freeout;
786 	}
787 
788 	for_each_rdmarxq(s, i) {
789 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
790 
791 		if (msi_idx > 0)
792 			msi_idx++;
793 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
794 				       msi_idx, &q->fl, uldrx_handler);
795 		if (err)
796 			goto freeout;
797 		memset(&q->stats, 0, sizeof(q->stats));
798 		s->rdma_rxq[i] = q->rspq.abs_id;
799 	}
800 
801 	for_each_port(adap, i) {
802 		/*
803 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
804 		 * have RDMA queues, and that's the right value.
805 		 */
806 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
807 					    s->fw_evtq.cntxt_id,
808 					    s->rdmarxq[i].rspq.cntxt_id);
809 		if (err)
810 			goto freeout;
811 	}
812 
813 	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
814 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
815 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
816 	return 0;
817 }
818 
819 /*
820  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
821  * started but failed, and a negative errno if flash load couldn't start.
822  */
upgrade_fw(struct adapter * adap)823 static int upgrade_fw(struct adapter *adap)
824 {
825 	int ret;
826 	u32 vers;
827 	const struct fw_hdr *hdr;
828 	const struct firmware *fw;
829 	struct device *dev = adap->pdev_dev;
830 
831 	ret = request_firmware(&fw, FW_FNAME, dev);
832 	if (ret < 0) {
833 		dev_err(dev, "unable to load firmware image " FW_FNAME
834 			", error %d\n", ret);
835 		return ret;
836 	}
837 
838 	hdr = (const struct fw_hdr *)fw->data;
839 	vers = ntohl(hdr->fw_ver);
840 	if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
841 		ret = -EINVAL;              /* wrong major version, won't do */
842 		goto out;
843 	}
844 
845 	/*
846 	 * If the flash FW is unusable or we found something newer, load it.
847 	 */
848 	if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
849 	    vers > adap->params.fw_vers) {
850 		ret = -t4_load_fw(adap, fw->data, fw->size);
851 		if (!ret)
852 			dev_info(dev, "firmware upgraded to version %pI4 from "
853 				 FW_FNAME "\n", &hdr->fw_ver);
854 	}
855 out:	release_firmware(fw);
856 	return ret;
857 }
858 
859 /*
860  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
861  * The allocated memory is cleared.
862  */
t4_alloc_mem(size_t size)863 void *t4_alloc_mem(size_t size)
864 {
865 	void *p = kzalloc(size, GFP_KERNEL);
866 
867 	if (!p)
868 		p = vzalloc(size);
869 	return p;
870 }
871 
872 /*
873  * Free memory allocated through alloc_mem().
874  */
t4_free_mem(void * addr)875 static void t4_free_mem(void *addr)
876 {
877 	if (is_vmalloc_addr(addr))
878 		vfree(addr);
879 	else
880 		kfree(addr);
881 }
882 
is_offload(const struct adapter * adap)883 static inline int is_offload(const struct adapter *adap)
884 {
885 	return adap->params.offload;
886 }
887 
888 /*
889  * Implementation of ethtool operations.
890  */
891 
get_msglevel(struct net_device * dev)892 static u32 get_msglevel(struct net_device *dev)
893 {
894 	return netdev2adap(dev)->msg_enable;
895 }
896 
set_msglevel(struct net_device * dev,u32 val)897 static void set_msglevel(struct net_device *dev, u32 val)
898 {
899 	netdev2adap(dev)->msg_enable = val;
900 }
901 
902 static char stats_strings[][ETH_GSTRING_LEN] = {
903 	"TxOctetsOK         ",
904 	"TxFramesOK         ",
905 	"TxBroadcastFrames  ",
906 	"TxMulticastFrames  ",
907 	"TxUnicastFrames    ",
908 	"TxErrorFrames      ",
909 
910 	"TxFrames64         ",
911 	"TxFrames65To127    ",
912 	"TxFrames128To255   ",
913 	"TxFrames256To511   ",
914 	"TxFrames512To1023  ",
915 	"TxFrames1024To1518 ",
916 	"TxFrames1519ToMax  ",
917 
918 	"TxFramesDropped    ",
919 	"TxPauseFrames      ",
920 	"TxPPP0Frames       ",
921 	"TxPPP1Frames       ",
922 	"TxPPP2Frames       ",
923 	"TxPPP3Frames       ",
924 	"TxPPP4Frames       ",
925 	"TxPPP5Frames       ",
926 	"TxPPP6Frames       ",
927 	"TxPPP7Frames       ",
928 
929 	"RxOctetsOK         ",
930 	"RxFramesOK         ",
931 	"RxBroadcastFrames  ",
932 	"RxMulticastFrames  ",
933 	"RxUnicastFrames    ",
934 
935 	"RxFramesTooLong    ",
936 	"RxJabberErrors     ",
937 	"RxFCSErrors        ",
938 	"RxLengthErrors     ",
939 	"RxSymbolErrors     ",
940 	"RxRuntFrames       ",
941 
942 	"RxFrames64         ",
943 	"RxFrames65To127    ",
944 	"RxFrames128To255   ",
945 	"RxFrames256To511   ",
946 	"RxFrames512To1023  ",
947 	"RxFrames1024To1518 ",
948 	"RxFrames1519ToMax  ",
949 
950 	"RxPauseFrames      ",
951 	"RxPPP0Frames       ",
952 	"RxPPP1Frames       ",
953 	"RxPPP2Frames       ",
954 	"RxPPP3Frames       ",
955 	"RxPPP4Frames       ",
956 	"RxPPP5Frames       ",
957 	"RxPPP6Frames       ",
958 	"RxPPP7Frames       ",
959 
960 	"RxBG0FramesDropped ",
961 	"RxBG1FramesDropped ",
962 	"RxBG2FramesDropped ",
963 	"RxBG3FramesDropped ",
964 	"RxBG0FramesTrunc   ",
965 	"RxBG1FramesTrunc   ",
966 	"RxBG2FramesTrunc   ",
967 	"RxBG3FramesTrunc   ",
968 
969 	"TSO                ",
970 	"TxCsumOffload      ",
971 	"RxCsumGood         ",
972 	"VLANextractions    ",
973 	"VLANinsertions     ",
974 	"GROpackets         ",
975 	"GROmerged          ",
976 };
977 
get_sset_count(struct net_device * dev,int sset)978 static int get_sset_count(struct net_device *dev, int sset)
979 {
980 	switch (sset) {
981 	case ETH_SS_STATS:
982 		return ARRAY_SIZE(stats_strings);
983 	default:
984 		return -EOPNOTSUPP;
985 	}
986 }
987 
988 #define T4_REGMAP_SIZE (160 * 1024)
989 
get_regs_len(struct net_device * dev)990 static int get_regs_len(struct net_device *dev)
991 {
992 	return T4_REGMAP_SIZE;
993 }
994 
get_eeprom_len(struct net_device * dev)995 static int get_eeprom_len(struct net_device *dev)
996 {
997 	return EEPROMSIZE;
998 }
999 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1000 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1001 {
1002 	struct adapter *adapter = netdev2adap(dev);
1003 
1004 	strcpy(info->driver, KBUILD_MODNAME);
1005 	strcpy(info->version, DRV_VERSION);
1006 	strcpy(info->bus_info, pci_name(adapter->pdev));
1007 
1008 	if (!adapter->params.fw_vers)
1009 		strcpy(info->fw_version, "N/A");
1010 	else
1011 		snprintf(info->fw_version, sizeof(info->fw_version),
1012 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1013 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1014 			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1015 			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1016 			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1017 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1018 			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1019 			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1020 			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1021 }
1022 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1023 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1024 {
1025 	if (stringset == ETH_SS_STATS)
1026 		memcpy(data, stats_strings, sizeof(stats_strings));
1027 }
1028 
1029 /*
1030  * port stats maintained per queue of the port.  They should be in the same
1031  * order as in stats_strings above.
1032  */
1033 struct queue_port_stats {
1034 	u64 tso;
1035 	u64 tx_csum;
1036 	u64 rx_csum;
1037 	u64 vlan_ex;
1038 	u64 vlan_ins;
1039 	u64 gro_pkts;
1040 	u64 gro_merged;
1041 };
1042 
collect_sge_port_stats(const struct adapter * adap,const struct port_info * p,struct queue_port_stats * s)1043 static void collect_sge_port_stats(const struct adapter *adap,
1044 		const struct port_info *p, struct queue_port_stats *s)
1045 {
1046 	int i;
1047 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1048 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1049 
1050 	memset(s, 0, sizeof(*s));
1051 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1052 		s->tso += tx->tso;
1053 		s->tx_csum += tx->tx_cso;
1054 		s->rx_csum += rx->stats.rx_cso;
1055 		s->vlan_ex += rx->stats.vlan_ex;
1056 		s->vlan_ins += tx->vlan_ins;
1057 		s->gro_pkts += rx->stats.lro_pkts;
1058 		s->gro_merged += rx->stats.lro_merged;
1059 	}
1060 }
1061 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1062 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1063 		      u64 *data)
1064 {
1065 	struct port_info *pi = netdev_priv(dev);
1066 	struct adapter *adapter = pi->adapter;
1067 
1068 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1069 
1070 	data += sizeof(struct port_stats) / sizeof(u64);
1071 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1072 }
1073 
1074 /*
1075  * Return a version number to identify the type of adapter.  The scheme is:
1076  * - bits 0..9: chip version
1077  * - bits 10..15: chip revision
1078  * - bits 16..23: register dump version
1079  */
mk_adap_vers(const struct adapter * ap)1080 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1081 {
1082 	return 4 | (ap->params.rev << 10) | (1 << 16);
1083 }
1084 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)1085 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1086 			   unsigned int end)
1087 {
1088 	u32 *p = buf + start;
1089 
1090 	for ( ; start <= end; start += sizeof(u32))
1091 		*p++ = t4_read_reg(ap, start);
1092 }
1093 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1094 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095 		     void *buf)
1096 {
1097 	static const unsigned int reg_ranges[] = {
1098 		0x1008, 0x1108,
1099 		0x1180, 0x11b4,
1100 		0x11fc, 0x123c,
1101 		0x1300, 0x173c,
1102 		0x1800, 0x18fc,
1103 		0x3000, 0x30d8,
1104 		0x30e0, 0x5924,
1105 		0x5960, 0x59d4,
1106 		0x5a00, 0x5af8,
1107 		0x6000, 0x6098,
1108 		0x6100, 0x6150,
1109 		0x6200, 0x6208,
1110 		0x6240, 0x6248,
1111 		0x6280, 0x6338,
1112 		0x6370, 0x638c,
1113 		0x6400, 0x643c,
1114 		0x6500, 0x6524,
1115 		0x6a00, 0x6a38,
1116 		0x6a60, 0x6a78,
1117 		0x6b00, 0x6b84,
1118 		0x6bf0, 0x6c84,
1119 		0x6cf0, 0x6d84,
1120 		0x6df0, 0x6e84,
1121 		0x6ef0, 0x6f84,
1122 		0x6ff0, 0x7084,
1123 		0x70f0, 0x7184,
1124 		0x71f0, 0x7284,
1125 		0x72f0, 0x7384,
1126 		0x73f0, 0x7450,
1127 		0x7500, 0x7530,
1128 		0x7600, 0x761c,
1129 		0x7680, 0x76cc,
1130 		0x7700, 0x7798,
1131 		0x77c0, 0x77fc,
1132 		0x7900, 0x79fc,
1133 		0x7b00, 0x7c38,
1134 		0x7d00, 0x7efc,
1135 		0x8dc0, 0x8e1c,
1136 		0x8e30, 0x8e78,
1137 		0x8ea0, 0x8f6c,
1138 		0x8fc0, 0x9074,
1139 		0x90fc, 0x90fc,
1140 		0x9400, 0x9458,
1141 		0x9600, 0x96bc,
1142 		0x9800, 0x9808,
1143 		0x9820, 0x983c,
1144 		0x9850, 0x9864,
1145 		0x9c00, 0x9c6c,
1146 		0x9c80, 0x9cec,
1147 		0x9d00, 0x9d6c,
1148 		0x9d80, 0x9dec,
1149 		0x9e00, 0x9e6c,
1150 		0x9e80, 0x9eec,
1151 		0x9f00, 0x9f6c,
1152 		0x9f80, 0x9fec,
1153 		0xd004, 0xd03c,
1154 		0xdfc0, 0xdfe0,
1155 		0xe000, 0xea7c,
1156 		0xf000, 0x11190,
1157 		0x19040, 0x1906c,
1158 		0x19078, 0x19080,
1159 		0x1908c, 0x19124,
1160 		0x19150, 0x191b0,
1161 		0x191d0, 0x191e8,
1162 		0x19238, 0x1924c,
1163 		0x193f8, 0x19474,
1164 		0x19490, 0x194f8,
1165 		0x19800, 0x19f30,
1166 		0x1a000, 0x1a06c,
1167 		0x1a0b0, 0x1a120,
1168 		0x1a128, 0x1a138,
1169 		0x1a190, 0x1a1c4,
1170 		0x1a1fc, 0x1a1fc,
1171 		0x1e040, 0x1e04c,
1172 		0x1e284, 0x1e28c,
1173 		0x1e2c0, 0x1e2c0,
1174 		0x1e2e0, 0x1e2e0,
1175 		0x1e300, 0x1e384,
1176 		0x1e3c0, 0x1e3c8,
1177 		0x1e440, 0x1e44c,
1178 		0x1e684, 0x1e68c,
1179 		0x1e6c0, 0x1e6c0,
1180 		0x1e6e0, 0x1e6e0,
1181 		0x1e700, 0x1e784,
1182 		0x1e7c0, 0x1e7c8,
1183 		0x1e840, 0x1e84c,
1184 		0x1ea84, 0x1ea8c,
1185 		0x1eac0, 0x1eac0,
1186 		0x1eae0, 0x1eae0,
1187 		0x1eb00, 0x1eb84,
1188 		0x1ebc0, 0x1ebc8,
1189 		0x1ec40, 0x1ec4c,
1190 		0x1ee84, 0x1ee8c,
1191 		0x1eec0, 0x1eec0,
1192 		0x1eee0, 0x1eee0,
1193 		0x1ef00, 0x1ef84,
1194 		0x1efc0, 0x1efc8,
1195 		0x1f040, 0x1f04c,
1196 		0x1f284, 0x1f28c,
1197 		0x1f2c0, 0x1f2c0,
1198 		0x1f2e0, 0x1f2e0,
1199 		0x1f300, 0x1f384,
1200 		0x1f3c0, 0x1f3c8,
1201 		0x1f440, 0x1f44c,
1202 		0x1f684, 0x1f68c,
1203 		0x1f6c0, 0x1f6c0,
1204 		0x1f6e0, 0x1f6e0,
1205 		0x1f700, 0x1f784,
1206 		0x1f7c0, 0x1f7c8,
1207 		0x1f840, 0x1f84c,
1208 		0x1fa84, 0x1fa8c,
1209 		0x1fac0, 0x1fac0,
1210 		0x1fae0, 0x1fae0,
1211 		0x1fb00, 0x1fb84,
1212 		0x1fbc0, 0x1fbc8,
1213 		0x1fc40, 0x1fc4c,
1214 		0x1fe84, 0x1fe8c,
1215 		0x1fec0, 0x1fec0,
1216 		0x1fee0, 0x1fee0,
1217 		0x1ff00, 0x1ff84,
1218 		0x1ffc0, 0x1ffc8,
1219 		0x20000, 0x2002c,
1220 		0x20100, 0x2013c,
1221 		0x20190, 0x201c8,
1222 		0x20200, 0x20318,
1223 		0x20400, 0x20528,
1224 		0x20540, 0x20614,
1225 		0x21000, 0x21040,
1226 		0x2104c, 0x21060,
1227 		0x210c0, 0x210ec,
1228 		0x21200, 0x21268,
1229 		0x21270, 0x21284,
1230 		0x212fc, 0x21388,
1231 		0x21400, 0x21404,
1232 		0x21500, 0x21518,
1233 		0x2152c, 0x2153c,
1234 		0x21550, 0x21554,
1235 		0x21600, 0x21600,
1236 		0x21608, 0x21628,
1237 		0x21630, 0x2163c,
1238 		0x21700, 0x2171c,
1239 		0x21780, 0x2178c,
1240 		0x21800, 0x21c38,
1241 		0x21c80, 0x21d7c,
1242 		0x21e00, 0x21e04,
1243 		0x22000, 0x2202c,
1244 		0x22100, 0x2213c,
1245 		0x22190, 0x221c8,
1246 		0x22200, 0x22318,
1247 		0x22400, 0x22528,
1248 		0x22540, 0x22614,
1249 		0x23000, 0x23040,
1250 		0x2304c, 0x23060,
1251 		0x230c0, 0x230ec,
1252 		0x23200, 0x23268,
1253 		0x23270, 0x23284,
1254 		0x232fc, 0x23388,
1255 		0x23400, 0x23404,
1256 		0x23500, 0x23518,
1257 		0x2352c, 0x2353c,
1258 		0x23550, 0x23554,
1259 		0x23600, 0x23600,
1260 		0x23608, 0x23628,
1261 		0x23630, 0x2363c,
1262 		0x23700, 0x2371c,
1263 		0x23780, 0x2378c,
1264 		0x23800, 0x23c38,
1265 		0x23c80, 0x23d7c,
1266 		0x23e00, 0x23e04,
1267 		0x24000, 0x2402c,
1268 		0x24100, 0x2413c,
1269 		0x24190, 0x241c8,
1270 		0x24200, 0x24318,
1271 		0x24400, 0x24528,
1272 		0x24540, 0x24614,
1273 		0x25000, 0x25040,
1274 		0x2504c, 0x25060,
1275 		0x250c0, 0x250ec,
1276 		0x25200, 0x25268,
1277 		0x25270, 0x25284,
1278 		0x252fc, 0x25388,
1279 		0x25400, 0x25404,
1280 		0x25500, 0x25518,
1281 		0x2552c, 0x2553c,
1282 		0x25550, 0x25554,
1283 		0x25600, 0x25600,
1284 		0x25608, 0x25628,
1285 		0x25630, 0x2563c,
1286 		0x25700, 0x2571c,
1287 		0x25780, 0x2578c,
1288 		0x25800, 0x25c38,
1289 		0x25c80, 0x25d7c,
1290 		0x25e00, 0x25e04,
1291 		0x26000, 0x2602c,
1292 		0x26100, 0x2613c,
1293 		0x26190, 0x261c8,
1294 		0x26200, 0x26318,
1295 		0x26400, 0x26528,
1296 		0x26540, 0x26614,
1297 		0x27000, 0x27040,
1298 		0x2704c, 0x27060,
1299 		0x270c0, 0x270ec,
1300 		0x27200, 0x27268,
1301 		0x27270, 0x27284,
1302 		0x272fc, 0x27388,
1303 		0x27400, 0x27404,
1304 		0x27500, 0x27518,
1305 		0x2752c, 0x2753c,
1306 		0x27550, 0x27554,
1307 		0x27600, 0x27600,
1308 		0x27608, 0x27628,
1309 		0x27630, 0x2763c,
1310 		0x27700, 0x2771c,
1311 		0x27780, 0x2778c,
1312 		0x27800, 0x27c38,
1313 		0x27c80, 0x27d7c,
1314 		0x27e00, 0x27e04
1315 	};
1316 
1317 	int i;
1318 	struct adapter *ap = netdev2adap(dev);
1319 
1320 	regs->version = mk_adap_vers(ap);
1321 
1322 	memset(buf, 0, T4_REGMAP_SIZE);
1323 	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1324 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1325 }
1326 
restart_autoneg(struct net_device * dev)1327 static int restart_autoneg(struct net_device *dev)
1328 {
1329 	struct port_info *p = netdev_priv(dev);
1330 
1331 	if (!netif_running(dev))
1332 		return -EAGAIN;
1333 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1334 		return -EINVAL;
1335 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1336 	return 0;
1337 }
1338 
identify_port(struct net_device * dev,u32 data)1339 static int identify_port(struct net_device *dev, u32 data)
1340 {
1341 	struct adapter *adap = netdev2adap(dev);
1342 
1343 	if (data == 0)
1344 		data = 2;     /* default to 2 seconds */
1345 
1346 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
1347 				data * 5);
1348 }
1349 
from_fw_linkcaps(unsigned int type,unsigned int caps)1350 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1351 {
1352 	unsigned int v = 0;
1353 
1354 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1355 	    type == FW_PORT_TYPE_BT_XAUI) {
1356 		v |= SUPPORTED_TP;
1357 		if (caps & FW_PORT_CAP_SPEED_100M)
1358 			v |= SUPPORTED_100baseT_Full;
1359 		if (caps & FW_PORT_CAP_SPEED_1G)
1360 			v |= SUPPORTED_1000baseT_Full;
1361 		if (caps & FW_PORT_CAP_SPEED_10G)
1362 			v |= SUPPORTED_10000baseT_Full;
1363 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1364 		v |= SUPPORTED_Backplane;
1365 		if (caps & FW_PORT_CAP_SPEED_1G)
1366 			v |= SUPPORTED_1000baseKX_Full;
1367 		if (caps & FW_PORT_CAP_SPEED_10G)
1368 			v |= SUPPORTED_10000baseKX4_Full;
1369 	} else if (type == FW_PORT_TYPE_KR)
1370 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1371 	else if (type == FW_PORT_TYPE_BP_AP)
1372 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1373 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1374 	else if (type == FW_PORT_TYPE_BP4_AP)
1375 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1376 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1377 		     SUPPORTED_10000baseKX4_Full;
1378 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
1379 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1380 		v |= SUPPORTED_FIBRE;
1381 
1382 	if (caps & FW_PORT_CAP_ANEG)
1383 		v |= SUPPORTED_Autoneg;
1384 	return v;
1385 }
1386 
to_fw_linkcaps(unsigned int caps)1387 static unsigned int to_fw_linkcaps(unsigned int caps)
1388 {
1389 	unsigned int v = 0;
1390 
1391 	if (caps & ADVERTISED_100baseT_Full)
1392 		v |= FW_PORT_CAP_SPEED_100M;
1393 	if (caps & ADVERTISED_1000baseT_Full)
1394 		v |= FW_PORT_CAP_SPEED_1G;
1395 	if (caps & ADVERTISED_10000baseT_Full)
1396 		v |= FW_PORT_CAP_SPEED_10G;
1397 	return v;
1398 }
1399 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1400 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1401 {
1402 	const struct port_info *p = netdev_priv(dev);
1403 
1404 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1405 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
1406 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
1407 		cmd->port = PORT_TP;
1408 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1409 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1410 		cmd->port = PORT_FIBRE;
1411 	else if (p->port_type == FW_PORT_TYPE_SFP) {
1412 		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1413 		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1414 			cmd->port = PORT_DA;
1415 		else
1416 			cmd->port = PORT_FIBRE;
1417 	} else
1418 		cmd->port = PORT_OTHER;
1419 
1420 	if (p->mdio_addr >= 0) {
1421 		cmd->phy_address = p->mdio_addr;
1422 		cmd->transceiver = XCVR_EXTERNAL;
1423 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1424 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1425 	} else {
1426 		cmd->phy_address = 0;  /* not really, but no better option */
1427 		cmd->transceiver = XCVR_INTERNAL;
1428 		cmd->mdio_support = 0;
1429 	}
1430 
1431 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1432 	cmd->advertising = from_fw_linkcaps(p->port_type,
1433 					    p->link_cfg.advertising);
1434 	cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1435 	cmd->duplex = DUPLEX_FULL;
1436 	cmd->autoneg = p->link_cfg.autoneg;
1437 	cmd->maxtxpkt = 0;
1438 	cmd->maxrxpkt = 0;
1439 	return 0;
1440 }
1441 
speed_to_caps(int speed)1442 static unsigned int speed_to_caps(int speed)
1443 {
1444 	if (speed == SPEED_100)
1445 		return FW_PORT_CAP_SPEED_100M;
1446 	if (speed == SPEED_1000)
1447 		return FW_PORT_CAP_SPEED_1G;
1448 	if (speed == SPEED_10000)
1449 		return FW_PORT_CAP_SPEED_10G;
1450 	return 0;
1451 }
1452 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1453 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1454 {
1455 	unsigned int cap;
1456 	struct port_info *p = netdev_priv(dev);
1457 	struct link_config *lc = &p->link_cfg;
1458 
1459 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
1460 		return -EINVAL;
1461 
1462 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1463 		/*
1464 		 * PHY offers a single speed.  See if that's what's
1465 		 * being requested.
1466 		 */
1467 		if (cmd->autoneg == AUTONEG_DISABLE &&
1468 		    (lc->supported & speed_to_caps(cmd->speed)))
1469 				return 0;
1470 		return -EINVAL;
1471 	}
1472 
1473 	if (cmd->autoneg == AUTONEG_DISABLE) {
1474 		cap = speed_to_caps(cmd->speed);
1475 
1476 		if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1477 		    cmd->speed == SPEED_10000)
1478 			return -EINVAL;
1479 		lc->requested_speed = cap;
1480 		lc->advertising = 0;
1481 	} else {
1482 		cap = to_fw_linkcaps(cmd->advertising);
1483 		if (!(lc->supported & cap))
1484 			return -EINVAL;
1485 		lc->requested_speed = 0;
1486 		lc->advertising = cap | FW_PORT_CAP_ANEG;
1487 	}
1488 	lc->autoneg = cmd->autoneg;
1489 
1490 	if (netif_running(dev))
1491 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1492 				     lc);
1493 	return 0;
1494 }
1495 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1496 static void get_pauseparam(struct net_device *dev,
1497 			   struct ethtool_pauseparam *epause)
1498 {
1499 	struct port_info *p = netdev_priv(dev);
1500 
1501 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1502 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1503 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1504 }
1505 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1506 static int set_pauseparam(struct net_device *dev,
1507 			  struct ethtool_pauseparam *epause)
1508 {
1509 	struct port_info *p = netdev_priv(dev);
1510 	struct link_config *lc = &p->link_cfg;
1511 
1512 	if (epause->autoneg == AUTONEG_DISABLE)
1513 		lc->requested_fc = 0;
1514 	else if (lc->supported & FW_PORT_CAP_ANEG)
1515 		lc->requested_fc = PAUSE_AUTONEG;
1516 	else
1517 		return -EINVAL;
1518 
1519 	if (epause->rx_pause)
1520 		lc->requested_fc |= PAUSE_RX;
1521 	if (epause->tx_pause)
1522 		lc->requested_fc |= PAUSE_TX;
1523 	if (netif_running(dev))
1524 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1525 				     lc);
1526 	return 0;
1527 }
1528 
get_rx_csum(struct net_device * dev)1529 static u32 get_rx_csum(struct net_device *dev)
1530 {
1531 	struct port_info *p = netdev_priv(dev);
1532 
1533 	return p->rx_offload & RX_CSO;
1534 }
1535 
set_rx_csum(struct net_device * dev,u32 data)1536 static int set_rx_csum(struct net_device *dev, u32 data)
1537 {
1538 	struct port_info *p = netdev_priv(dev);
1539 
1540 	if (data)
1541 		p->rx_offload |= RX_CSO;
1542 	else
1543 		p->rx_offload &= ~RX_CSO;
1544 	return 0;
1545 }
1546 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1547 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1548 {
1549 	const struct port_info *pi = netdev_priv(dev);
1550 	const struct sge *s = &pi->adapter->sge;
1551 
1552 	e->rx_max_pending = MAX_RX_BUFFERS;
1553 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1554 	e->rx_jumbo_max_pending = 0;
1555 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1556 
1557 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1558 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1559 	e->rx_jumbo_pending = 0;
1560 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1561 }
1562 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1563 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1564 {
1565 	int i;
1566 	const struct port_info *pi = netdev_priv(dev);
1567 	struct adapter *adapter = pi->adapter;
1568 	struct sge *s = &adapter->sge;
1569 
1570 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1571 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1572 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1573 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1574 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1575 		return -EINVAL;
1576 
1577 	if (adapter->flags & FULL_INIT_DONE)
1578 		return -EBUSY;
1579 
1580 	for (i = 0; i < pi->nqsets; ++i) {
1581 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1582 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1583 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1584 	}
1585 	return 0;
1586 }
1587 
closest_timer(const struct sge * s,int time)1588 static int closest_timer(const struct sge *s, int time)
1589 {
1590 	int i, delta, match = 0, min_delta = INT_MAX;
1591 
1592 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1593 		delta = time - s->timer_val[i];
1594 		if (delta < 0)
1595 			delta = -delta;
1596 		if (delta < min_delta) {
1597 			min_delta = delta;
1598 			match = i;
1599 		}
1600 	}
1601 	return match;
1602 }
1603 
closest_thres(const struct sge * s,int thres)1604 static int closest_thres(const struct sge *s, int thres)
1605 {
1606 	int i, delta, match = 0, min_delta = INT_MAX;
1607 
1608 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1609 		delta = thres - s->counter_val[i];
1610 		if (delta < 0)
1611 			delta = -delta;
1612 		if (delta < min_delta) {
1613 			min_delta = delta;
1614 			match = i;
1615 		}
1616 	}
1617 	return match;
1618 }
1619 
1620 /*
1621  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1622  */
qtimer_val(const struct adapter * adap,const struct sge_rspq * q)1623 static unsigned int qtimer_val(const struct adapter *adap,
1624 			       const struct sge_rspq *q)
1625 {
1626 	unsigned int idx = q->intr_params >> 1;
1627 
1628 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1629 }
1630 
1631 /**
1632  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1633  *	@adap: the adapter
1634  *	@q: the Rx queue
1635  *	@us: the hold-off time in us, or 0 to disable timer
1636  *	@cnt: the hold-off packet count, or 0 to disable counter
1637  *
1638  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
1639  *	one of the two needs to be enabled for the queue to generate interrupts.
1640  */
set_rxq_intr_params(struct adapter * adap,struct sge_rspq * q,unsigned int us,unsigned int cnt)1641 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1642 			       unsigned int us, unsigned int cnt)
1643 {
1644 	if ((us | cnt) == 0)
1645 		cnt = 1;
1646 
1647 	if (cnt) {
1648 		int err;
1649 		u32 v, new_idx;
1650 
1651 		new_idx = closest_thres(&adap->sge, cnt);
1652 		if (q->desc && q->pktcnt_idx != new_idx) {
1653 			/* the queue has already been created, update it */
1654 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1655 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1656 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
1657 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1658 					    &new_idx);
1659 			if (err)
1660 				return err;
1661 		}
1662 		q->pktcnt_idx = new_idx;
1663 	}
1664 
1665 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1666 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1667 	return 0;
1668 }
1669 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)1670 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671 {
1672 	const struct port_info *pi = netdev_priv(dev);
1673 	struct adapter *adap = pi->adapter;
1674 
1675 	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1676 			c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1677 }
1678 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)1679 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1680 {
1681 	const struct port_info *pi = netdev_priv(dev);
1682 	const struct adapter *adap = pi->adapter;
1683 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1684 
1685 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
1686 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1687 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
1688 	return 0;
1689 }
1690 
1691 /**
1692  *	eeprom_ptov - translate a physical EEPROM address to virtual
1693  *	@phys_addr: the physical EEPROM address
1694  *	@fn: the PCI function number
1695  *	@sz: size of function-specific area
1696  *
1697  *	Translate a physical EEPROM address to virtual.  The first 1K is
1698  *	accessed through virtual addresses starting at 31K, the rest is
1699  *	accessed through virtual addresses starting at 0.
1700  *
1701  *	The mapping is as follows:
1702  *	[0..1K) -> [31K..32K)
1703  *	[1K..1K+A) -> [31K-A..31K)
1704  *	[1K+A..ES) -> [0..ES-A-1K)
1705  *
1706  *	where A = @fn * @sz, and ES = EEPROM size.
1707  */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)1708 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1709 {
1710 	fn *= sz;
1711 	if (phys_addr < 1024)
1712 		return phys_addr + (31 << 10);
1713 	if (phys_addr < 1024 + fn)
1714 		return 31744 - fn + phys_addr - 1024;
1715 	if (phys_addr < EEPROMSIZE)
1716 		return phys_addr - 1024 - fn;
1717 	return -EINVAL;
1718 }
1719 
1720 /*
1721  * The next two routines implement eeprom read/write from physical addresses.
1722  */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)1723 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1724 {
1725 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1726 
1727 	if (vaddr >= 0)
1728 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1729 	return vaddr < 0 ? vaddr : 0;
1730 }
1731 
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)1732 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1733 {
1734 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1735 
1736 	if (vaddr >= 0)
1737 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1738 	return vaddr < 0 ? vaddr : 0;
1739 }
1740 
1741 #define EEPROM_MAGIC 0x38E2F10C
1742 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)1743 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1744 		      u8 *data)
1745 {
1746 	int i, err = 0;
1747 	struct adapter *adapter = netdev2adap(dev);
1748 
1749 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1750 	if (!buf)
1751 		return -ENOMEM;
1752 
1753 	e->magic = EEPROM_MAGIC;
1754 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1755 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1756 
1757 	if (!err)
1758 		memcpy(data, buf + e->offset, e->len);
1759 	kfree(buf);
1760 	return err;
1761 }
1762 
set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1763 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1764 		      u8 *data)
1765 {
1766 	u8 *buf;
1767 	int err = 0;
1768 	u32 aligned_offset, aligned_len, *p;
1769 	struct adapter *adapter = netdev2adap(dev);
1770 
1771 	if (eeprom->magic != EEPROM_MAGIC)
1772 		return -EINVAL;
1773 
1774 	aligned_offset = eeprom->offset & ~3;
1775 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1776 
1777 	if (adapter->fn > 0) {
1778 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1779 
1780 		if (aligned_offset < start ||
1781 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1782 			return -EPERM;
1783 	}
1784 
1785 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1786 		/*
1787 		 * RMW possibly needed for first or last words.
1788 		 */
1789 		buf = kmalloc(aligned_len, GFP_KERNEL);
1790 		if (!buf)
1791 			return -ENOMEM;
1792 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1793 		if (!err && aligned_len > 4)
1794 			err = eeprom_rd_phys(adapter,
1795 					     aligned_offset + aligned_len - 4,
1796 					     (u32 *)&buf[aligned_len - 4]);
1797 		if (err)
1798 			goto out;
1799 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1800 	} else
1801 		buf = data;
1802 
1803 	err = t4_seeprom_wp(adapter, false);
1804 	if (err)
1805 		goto out;
1806 
1807 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1808 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1809 		aligned_offset += 4;
1810 	}
1811 
1812 	if (!err)
1813 		err = t4_seeprom_wp(adapter, true);
1814 out:
1815 	if (buf != data)
1816 		kfree(buf);
1817 	return err;
1818 }
1819 
set_flash(struct net_device * netdev,struct ethtool_flash * ef)1820 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1821 {
1822 	int ret;
1823 	const struct firmware *fw;
1824 	struct adapter *adap = netdev2adap(netdev);
1825 
1826 	ef->data[sizeof(ef->data) - 1] = '\0';
1827 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1828 	if (ret < 0)
1829 		return ret;
1830 
1831 	ret = t4_load_fw(adap, fw->data, fw->size);
1832 	release_firmware(fw);
1833 	if (!ret)
1834 		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1835 	return ret;
1836 }
1837 
1838 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1839 #define BCAST_CRC 0xa0ccc1a6
1840 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1841 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1842 {
1843 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
1844 	wol->wolopts = netdev2adap(dev)->wol;
1845 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1846 }
1847 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1848 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1849 {
1850 	int err = 0;
1851 	struct port_info *pi = netdev_priv(dev);
1852 
1853 	if (wol->wolopts & ~WOL_SUPPORTED)
1854 		return -EINVAL;
1855 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1856 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1857 	if (wol->wolopts & WAKE_BCAST) {
1858 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1859 					~0ULL, 0, false);
1860 		if (!err)
1861 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1862 						~6ULL, ~0ULL, BCAST_CRC, true);
1863 	} else
1864 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1865 	return err;
1866 }
1867 
1868 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1869 
set_tso(struct net_device * dev,u32 value)1870 static int set_tso(struct net_device *dev, u32 value)
1871 {
1872 	if (value)
1873 		dev->features |= TSO_FLAGS;
1874 	else
1875 		dev->features &= ~TSO_FLAGS;
1876 	return 0;
1877 }
1878 
set_flags(struct net_device * dev,u32 flags)1879 static int set_flags(struct net_device *dev, u32 flags)
1880 {
1881 	int err;
1882 	unsigned long old_feat = dev->features;
1883 
1884 	err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
1885 				   ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1886 	if (err)
1887 		return err;
1888 
1889 	if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
1890 		const struct port_info *pi = netdev_priv(dev);
1891 
1892 		err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1893 				    -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
1894 				    true);
1895 		if (err)
1896 			dev->features = old_feat;
1897 	}
1898 	return err;
1899 }
1900 
get_rss_table(struct net_device * dev,struct ethtool_rxfh_indir * p)1901 static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1902 {
1903 	const struct port_info *pi = netdev_priv(dev);
1904 	unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1905 
1906 	p->size = pi->rss_size;
1907 	while (n--)
1908 		p->ring_index[n] = pi->rss[n];
1909 	return 0;
1910 }
1911 
set_rss_table(struct net_device * dev,const struct ethtool_rxfh_indir * p)1912 static int set_rss_table(struct net_device *dev,
1913 			 const struct ethtool_rxfh_indir *p)
1914 {
1915 	unsigned int i;
1916 	struct port_info *pi = netdev_priv(dev);
1917 
1918 	if (p->size != pi->rss_size)
1919 		return -EINVAL;
1920 	for (i = 0; i < p->size; i++)
1921 		if (p->ring_index[i] >= pi->nqsets)
1922 			return -EINVAL;
1923 	for (i = 0; i < p->size; i++)
1924 		pi->rss[i] = p->ring_index[i];
1925 	if (pi->adapter->flags & FULL_INIT_DONE)
1926 		return write_rss(pi, pi->rss);
1927 	return 0;
1928 }
1929 
get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,void * rules)1930 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1931 		     void *rules)
1932 {
1933 	const struct port_info *pi = netdev_priv(dev);
1934 
1935 	switch (info->cmd) {
1936 	case ETHTOOL_GRXFH: {
1937 		unsigned int v = pi->rss_mode;
1938 
1939 		info->data = 0;
1940 		switch (info->flow_type) {
1941 		case TCP_V4_FLOW:
1942 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1943 				info->data = RXH_IP_SRC | RXH_IP_DST |
1944 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1945 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1946 				info->data = RXH_IP_SRC | RXH_IP_DST;
1947 			break;
1948 		case UDP_V4_FLOW:
1949 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1950 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1951 				info->data = RXH_IP_SRC | RXH_IP_DST |
1952 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1953 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1954 				info->data = RXH_IP_SRC | RXH_IP_DST;
1955 			break;
1956 		case SCTP_V4_FLOW:
1957 		case AH_ESP_V4_FLOW:
1958 		case IPV4_FLOW:
1959 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1960 				info->data = RXH_IP_SRC | RXH_IP_DST;
1961 			break;
1962 		case TCP_V6_FLOW:
1963 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1964 				info->data = RXH_IP_SRC | RXH_IP_DST |
1965 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1966 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1967 				info->data = RXH_IP_SRC | RXH_IP_DST;
1968 			break;
1969 		case UDP_V6_FLOW:
1970 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1971 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1972 				info->data = RXH_IP_SRC | RXH_IP_DST |
1973 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1974 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1975 				info->data = RXH_IP_SRC | RXH_IP_DST;
1976 			break;
1977 		case SCTP_V6_FLOW:
1978 		case AH_ESP_V6_FLOW:
1979 		case IPV6_FLOW:
1980 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1981 				info->data = RXH_IP_SRC | RXH_IP_DST;
1982 			break;
1983 		}
1984 		return 0;
1985 	}
1986 	case ETHTOOL_GRXRINGS:
1987 		info->data = pi->nqsets;
1988 		return 0;
1989 	}
1990 	return -EOPNOTSUPP;
1991 }
1992 
1993 static struct ethtool_ops cxgb_ethtool_ops = {
1994 	.get_settings      = get_settings,
1995 	.set_settings      = set_settings,
1996 	.get_drvinfo       = get_drvinfo,
1997 	.get_msglevel      = get_msglevel,
1998 	.set_msglevel      = set_msglevel,
1999 	.get_ringparam     = get_sge_param,
2000 	.set_ringparam     = set_sge_param,
2001 	.get_coalesce      = get_coalesce,
2002 	.set_coalesce      = set_coalesce,
2003 	.get_eeprom_len    = get_eeprom_len,
2004 	.get_eeprom        = get_eeprom,
2005 	.set_eeprom        = set_eeprom,
2006 	.get_pauseparam    = get_pauseparam,
2007 	.set_pauseparam    = set_pauseparam,
2008 	.get_rx_csum       = get_rx_csum,
2009 	.set_rx_csum       = set_rx_csum,
2010 	.set_tx_csum       = ethtool_op_set_tx_ipv6_csum,
2011 	.set_sg            = ethtool_op_set_sg,
2012 	.get_link          = ethtool_op_get_link,
2013 	.get_strings       = get_strings,
2014 	.phys_id           = identify_port,
2015 	.nway_reset        = restart_autoneg,
2016 	.get_sset_count    = get_sset_count,
2017 	.get_ethtool_stats = get_stats,
2018 	.get_regs_len      = get_regs_len,
2019 	.get_regs          = get_regs,
2020 	.get_wol           = get_wol,
2021 	.set_wol           = set_wol,
2022 	.set_tso           = set_tso,
2023 	.set_flags         = set_flags,
2024 	.get_rxnfc         = get_rxnfc,
2025 	.get_rxfh_indir    = get_rss_table,
2026 	.set_rxfh_indir    = set_rss_table,
2027 	.flash_device      = set_flash,
2028 };
2029 
2030 /*
2031  * debugfs support
2032  */
2033 
mem_open(struct inode * inode,struct file * file)2034 static int mem_open(struct inode *inode, struct file *file)
2035 {
2036 	file->private_data = inode->i_private;
2037 	return 0;
2038 }
2039 
mem_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2040 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2041 			loff_t *ppos)
2042 {
2043 	loff_t pos = *ppos;
2044 	loff_t avail = file->f_path.dentry->d_inode->i_size;
2045 	unsigned int mem = (uintptr_t)file->private_data & 3;
2046 	struct adapter *adap = file->private_data - mem;
2047 
2048 	if (pos < 0)
2049 		return -EINVAL;
2050 	if (pos >= avail)
2051 		return 0;
2052 	if (count > avail - pos)
2053 		count = avail - pos;
2054 
2055 	while (count) {
2056 		size_t len;
2057 		int ret, ofst;
2058 		__be32 data[16];
2059 
2060 		if (mem == MEM_MC)
2061 			ret = t4_mc_read(adap, pos, data, NULL);
2062 		else
2063 			ret = t4_edc_read(adap, mem, pos, data, NULL);
2064 		if (ret)
2065 			return ret;
2066 
2067 		ofst = pos % sizeof(data);
2068 		len = min(count, sizeof(data) - ofst);
2069 		if (copy_to_user(buf, (u8 *)data + ofst, len))
2070 			return -EFAULT;
2071 
2072 		buf += len;
2073 		pos += len;
2074 		count -= len;
2075 	}
2076 	count = pos - *ppos;
2077 	*ppos = pos;
2078 	return count;
2079 }
2080 
2081 static const struct file_operations mem_debugfs_fops = {
2082 	.owner   = THIS_MODULE,
2083 	.open    = mem_open,
2084 	.read    = mem_read,
2085 	.llseek  = default_llseek,
2086 };
2087 
add_debugfs_mem(struct adapter * adap,const char * name,unsigned int idx,unsigned int size_mb)2088 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2089 				      unsigned int idx, unsigned int size_mb)
2090 {
2091 	struct dentry *de;
2092 
2093 	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2094 				 (void *)adap + idx, &mem_debugfs_fops);
2095 	if (de && de->d_inode)
2096 		de->d_inode->i_size = size_mb << 20;
2097 }
2098 
setup_debugfs(struct adapter * adap)2099 static int __devinit setup_debugfs(struct adapter *adap)
2100 {
2101 	int i;
2102 
2103 	if (IS_ERR_OR_NULL(adap->debugfs_root))
2104 		return -1;
2105 
2106 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2107 	if (i & EDRAM0_ENABLE)
2108 		add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2109 	if (i & EDRAM1_ENABLE)
2110 		add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2111 	if (i & EXT_MEM_ENABLE)
2112 		add_debugfs_mem(adap, "mc", MEM_MC,
2113 			EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2114 	if (adap->l2t)
2115 		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2116 				    &t4_l2t_fops);
2117 	return 0;
2118 }
2119 
2120 /*
2121  * upper-layer driver support
2122  */
2123 
2124 /*
2125  * Allocate an active-open TID and set it to the supplied value.
2126  */
cxgb4_alloc_atid(struct tid_info * t,void * data)2127 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2128 {
2129 	int atid = -1;
2130 
2131 	spin_lock_bh(&t->atid_lock);
2132 	if (t->afree) {
2133 		union aopen_entry *p = t->afree;
2134 
2135 		atid = p - t->atid_tab;
2136 		t->afree = p->next;
2137 		p->data = data;
2138 		t->atids_in_use++;
2139 	}
2140 	spin_unlock_bh(&t->atid_lock);
2141 	return atid;
2142 }
2143 EXPORT_SYMBOL(cxgb4_alloc_atid);
2144 
2145 /*
2146  * Release an active-open TID.
2147  */
cxgb4_free_atid(struct tid_info * t,unsigned int atid)2148 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2149 {
2150 	union aopen_entry *p = &t->atid_tab[atid];
2151 
2152 	spin_lock_bh(&t->atid_lock);
2153 	p->next = t->afree;
2154 	t->afree = p;
2155 	t->atids_in_use--;
2156 	spin_unlock_bh(&t->atid_lock);
2157 }
2158 EXPORT_SYMBOL(cxgb4_free_atid);
2159 
2160 /*
2161  * Allocate a server TID and set it to the supplied value.
2162  */
cxgb4_alloc_stid(struct tid_info * t,int family,void * data)2163 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2164 {
2165 	int stid;
2166 
2167 	spin_lock_bh(&t->stid_lock);
2168 	if (family == PF_INET) {
2169 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2170 		if (stid < t->nstids)
2171 			__set_bit(stid, t->stid_bmap);
2172 		else
2173 			stid = -1;
2174 	} else {
2175 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2176 		if (stid < 0)
2177 			stid = -1;
2178 	}
2179 	if (stid >= 0) {
2180 		t->stid_tab[stid].data = data;
2181 		stid += t->stid_base;
2182 		t->stids_in_use++;
2183 	}
2184 	spin_unlock_bh(&t->stid_lock);
2185 	return stid;
2186 }
2187 EXPORT_SYMBOL(cxgb4_alloc_stid);
2188 
2189 /*
2190  * Release a server TID.
2191  */
cxgb4_free_stid(struct tid_info * t,unsigned int stid,int family)2192 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2193 {
2194 	stid -= t->stid_base;
2195 	spin_lock_bh(&t->stid_lock);
2196 	if (family == PF_INET)
2197 		__clear_bit(stid, t->stid_bmap);
2198 	else
2199 		bitmap_release_region(t->stid_bmap, stid, 2);
2200 	t->stid_tab[stid].data = NULL;
2201 	t->stids_in_use--;
2202 	spin_unlock_bh(&t->stid_lock);
2203 }
2204 EXPORT_SYMBOL(cxgb4_free_stid);
2205 
2206 /*
2207  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
2208  */
mk_tid_release(struct sk_buff * skb,unsigned int chan,unsigned int tid)2209 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2210 			   unsigned int tid)
2211 {
2212 	struct cpl_tid_release *req;
2213 
2214 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2215 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2216 	INIT_TP_WR(req, tid);
2217 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2218 }
2219 
2220 /*
2221  * Queue a TID release request and if necessary schedule a work queue to
2222  * process it.
2223  */
cxgb4_queue_tid_release(struct tid_info * t,unsigned int chan,unsigned int tid)2224 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2225 				    unsigned int tid)
2226 {
2227 	void **p = &t->tid_tab[tid];
2228 	struct adapter *adap = container_of(t, struct adapter, tids);
2229 
2230 	spin_lock_bh(&adap->tid_release_lock);
2231 	*p = adap->tid_release_head;
2232 	/* Low 2 bits encode the Tx channel number */
2233 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
2234 	if (!adap->tid_release_task_busy) {
2235 		adap->tid_release_task_busy = true;
2236 		schedule_work(&adap->tid_release_task);
2237 	}
2238 	spin_unlock_bh(&adap->tid_release_lock);
2239 }
2240 
2241 /*
2242  * Process the list of pending TID release requests.
2243  */
process_tid_release_list(struct work_struct * work)2244 static void process_tid_release_list(struct work_struct *work)
2245 {
2246 	struct sk_buff *skb;
2247 	struct adapter *adap;
2248 
2249 	adap = container_of(work, struct adapter, tid_release_task);
2250 
2251 	spin_lock_bh(&adap->tid_release_lock);
2252 	while (adap->tid_release_head) {
2253 		void **p = adap->tid_release_head;
2254 		unsigned int chan = (uintptr_t)p & 3;
2255 		p = (void *)p - chan;
2256 
2257 		adap->tid_release_head = *p;
2258 		*p = NULL;
2259 		spin_unlock_bh(&adap->tid_release_lock);
2260 
2261 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2262 					 GFP_KERNEL)))
2263 			schedule_timeout_uninterruptible(1);
2264 
2265 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2266 		t4_ofld_send(adap, skb);
2267 		spin_lock_bh(&adap->tid_release_lock);
2268 	}
2269 	adap->tid_release_task_busy = false;
2270 	spin_unlock_bh(&adap->tid_release_lock);
2271 }
2272 
2273 /*
2274  * Release a TID and inform HW.  If we are unable to allocate the release
2275  * message we defer to a work queue.
2276  */
cxgb4_remove_tid(struct tid_info * t,unsigned int chan,unsigned int tid)2277 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2278 {
2279 	void *old;
2280 	struct sk_buff *skb;
2281 	struct adapter *adap = container_of(t, struct adapter, tids);
2282 
2283 	old = t->tid_tab[tid];
2284 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2285 	if (likely(skb)) {
2286 		t->tid_tab[tid] = NULL;
2287 		mk_tid_release(skb, chan, tid);
2288 		t4_ofld_send(adap, skb);
2289 	} else
2290 		cxgb4_queue_tid_release(t, chan, tid);
2291 	if (old)
2292 		atomic_dec(&t->tids_in_use);
2293 }
2294 EXPORT_SYMBOL(cxgb4_remove_tid);
2295 
2296 /*
2297  * Allocate and initialize the TID tables.  Returns 0 on success.
2298  */
tid_init(struct tid_info * t)2299 static int tid_init(struct tid_info *t)
2300 {
2301 	size_t size;
2302 	unsigned int natids = t->natids;
2303 
2304 	size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2305 	       t->nstids * sizeof(*t->stid_tab) +
2306 	       BITS_TO_LONGS(t->nstids) * sizeof(long);
2307 	t->tid_tab = t4_alloc_mem(size);
2308 	if (!t->tid_tab)
2309 		return -ENOMEM;
2310 
2311 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2312 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2313 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2314 	spin_lock_init(&t->stid_lock);
2315 	spin_lock_init(&t->atid_lock);
2316 
2317 	t->stids_in_use = 0;
2318 	t->afree = NULL;
2319 	t->atids_in_use = 0;
2320 	atomic_set(&t->tids_in_use, 0);
2321 
2322 	/* Setup the free list for atid_tab and clear the stid bitmap. */
2323 	if (natids) {
2324 		while (--natids)
2325 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2326 		t->afree = t->atid_tab;
2327 	}
2328 	bitmap_zero(t->stid_bmap, t->nstids);
2329 	return 0;
2330 }
2331 
2332 /**
2333  *	cxgb4_create_server - create an IP server
2334  *	@dev: the device
2335  *	@stid: the server TID
2336  *	@sip: local IP address to bind server to
2337  *	@sport: the server's TCP port
2338  *	@queue: queue to direct messages from this server to
2339  *
2340  *	Create an IP server for the given port and address.
2341  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
2342  */
cxgb4_create_server(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,unsigned int queue)2343 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2344 			__be32 sip, __be16 sport, unsigned int queue)
2345 {
2346 	unsigned int chan;
2347 	struct sk_buff *skb;
2348 	struct adapter *adap;
2349 	struct cpl_pass_open_req *req;
2350 
2351 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2352 	if (!skb)
2353 		return -ENOMEM;
2354 
2355 	adap = netdev2adap(dev);
2356 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2357 	INIT_TP_WR(req, 0);
2358 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2359 	req->local_port = sport;
2360 	req->peer_port = htons(0);
2361 	req->local_ip = sip;
2362 	req->peer_ip = htonl(0);
2363 	chan = rxq_to_chan(&adap->sge, queue);
2364 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
2365 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2366 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2367 	return t4_mgmt_tx(adap, skb);
2368 }
2369 EXPORT_SYMBOL(cxgb4_create_server);
2370 
2371 /**
2372  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2373  *	@mtus: the HW MTU table
2374  *	@mtu: the target MTU
2375  *	@idx: index of selected entry in the MTU table
2376  *
2377  *	Returns the index and the value in the HW MTU table that is closest to
2378  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
2379  *	table, in which case that smallest available value is selected.
2380  */
cxgb4_best_mtu(const unsigned short * mtus,unsigned short mtu,unsigned int * idx)2381 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2382 			    unsigned int *idx)
2383 {
2384 	unsigned int i = 0;
2385 
2386 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2387 		++i;
2388 	if (idx)
2389 		*idx = i;
2390 	return mtus[i];
2391 }
2392 EXPORT_SYMBOL(cxgb4_best_mtu);
2393 
2394 /**
2395  *	cxgb4_port_chan - get the HW channel of a port
2396  *	@dev: the net device for the port
2397  *
2398  *	Return the HW Tx channel of the given port.
2399  */
cxgb4_port_chan(const struct net_device * dev)2400 unsigned int cxgb4_port_chan(const struct net_device *dev)
2401 {
2402 	return netdev2pinfo(dev)->tx_chan;
2403 }
2404 EXPORT_SYMBOL(cxgb4_port_chan);
2405 
2406 /**
2407  *	cxgb4_port_viid - get the VI id of a port
2408  *	@dev: the net device for the port
2409  *
2410  *	Return the VI id of the given port.
2411  */
cxgb4_port_viid(const struct net_device * dev)2412 unsigned int cxgb4_port_viid(const struct net_device *dev)
2413 {
2414 	return netdev2pinfo(dev)->viid;
2415 }
2416 EXPORT_SYMBOL(cxgb4_port_viid);
2417 
2418 /**
2419  *	cxgb4_port_idx - get the index of a port
2420  *	@dev: the net device for the port
2421  *
2422  *	Return the index of the given port.
2423  */
cxgb4_port_idx(const struct net_device * dev)2424 unsigned int cxgb4_port_idx(const struct net_device *dev)
2425 {
2426 	return netdev2pinfo(dev)->port_id;
2427 }
2428 EXPORT_SYMBOL(cxgb4_port_idx);
2429 
cxgb4_get_tcp_stats(struct pci_dev * pdev,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6)2430 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2431 			 struct tp_tcp_stats *v6)
2432 {
2433 	struct adapter *adap = pci_get_drvdata(pdev);
2434 
2435 	spin_lock(&adap->stats_lock);
2436 	t4_tp_get_tcp_stats(adap, v4, v6);
2437 	spin_unlock(&adap->stats_lock);
2438 }
2439 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2440 
cxgb4_iscsi_init(struct net_device * dev,unsigned int tag_mask,const unsigned int * pgsz_order)2441 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2442 		      const unsigned int *pgsz_order)
2443 {
2444 	struct adapter *adap = netdev2adap(dev);
2445 
2446 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2447 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2448 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2449 		     HPZ3(pgsz_order[3]));
2450 }
2451 EXPORT_SYMBOL(cxgb4_iscsi_init);
2452 
2453 static struct pci_driver cxgb4_driver;
2454 
check_neigh_update(struct neighbour * neigh)2455 static void check_neigh_update(struct neighbour *neigh)
2456 {
2457 	const struct device *parent;
2458 	const struct net_device *netdev = neigh->dev;
2459 
2460 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
2461 		netdev = vlan_dev_real_dev(netdev);
2462 	parent = netdev->dev.parent;
2463 	if (parent && parent->driver == &cxgb4_driver.driver)
2464 		t4_l2t_update(dev_get_drvdata(parent), neigh);
2465 }
2466 
netevent_cb(struct notifier_block * nb,unsigned long event,void * data)2467 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2468 		       void *data)
2469 {
2470 	switch (event) {
2471 	case NETEVENT_NEIGH_UPDATE:
2472 		check_neigh_update(data);
2473 		break;
2474 	case NETEVENT_REDIRECT:
2475 	default:
2476 		break;
2477 	}
2478 	return 0;
2479 }
2480 
2481 static bool netevent_registered;
2482 static struct notifier_block cxgb4_netevent_nb = {
2483 	.notifier_call = netevent_cb
2484 };
2485 
uld_attach(struct adapter * adap,unsigned int uld)2486 static void uld_attach(struct adapter *adap, unsigned int uld)
2487 {
2488 	void *handle;
2489 	struct cxgb4_lld_info lli;
2490 
2491 	lli.pdev = adap->pdev;
2492 	lli.l2t = adap->l2t;
2493 	lli.tids = &adap->tids;
2494 	lli.ports = adap->port;
2495 	lli.vr = &adap->vres;
2496 	lli.mtus = adap->params.mtus;
2497 	if (uld == CXGB4_ULD_RDMA) {
2498 		lli.rxq_ids = adap->sge.rdma_rxq;
2499 		lli.nrxq = adap->sge.rdmaqs;
2500 	} else if (uld == CXGB4_ULD_ISCSI) {
2501 		lli.rxq_ids = adap->sge.ofld_rxq;
2502 		lli.nrxq = adap->sge.ofldqsets;
2503 	}
2504 	lli.ntxq = adap->sge.ofldqsets;
2505 	lli.nchan = adap->params.nports;
2506 	lli.nports = adap->params.nports;
2507 	lli.wr_cred = adap->params.ofldq_wr_cred;
2508 	lli.adapter_type = adap->params.rev;
2509 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2510 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2511 			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2512 			(adap->fn * 4));
2513 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2514 			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2515 			(adap->fn * 4));
2516 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2517 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2518 	lli.fw_vers = adap->params.fw_vers;
2519 
2520 	handle = ulds[uld].add(&lli);
2521 	if (IS_ERR(handle)) {
2522 		dev_warn(adap->pdev_dev,
2523 			 "could not attach to the %s driver, error %ld\n",
2524 			 uld_str[uld], PTR_ERR(handle));
2525 		return;
2526 	}
2527 
2528 	adap->uld_handle[uld] = handle;
2529 
2530 	if (!netevent_registered) {
2531 		register_netevent_notifier(&cxgb4_netevent_nb);
2532 		netevent_registered = true;
2533 	}
2534 
2535 	if (adap->flags & FULL_INIT_DONE)
2536 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
2537 }
2538 
attach_ulds(struct adapter * adap)2539 static void attach_ulds(struct adapter *adap)
2540 {
2541 	unsigned int i;
2542 
2543 	mutex_lock(&uld_mutex);
2544 	list_add_tail(&adap->list_node, &adapter_list);
2545 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2546 		if (ulds[i].add)
2547 			uld_attach(adap, i);
2548 	mutex_unlock(&uld_mutex);
2549 }
2550 
detach_ulds(struct adapter * adap)2551 static void detach_ulds(struct adapter *adap)
2552 {
2553 	unsigned int i;
2554 
2555 	mutex_lock(&uld_mutex);
2556 	list_del(&adap->list_node);
2557 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2558 		if (adap->uld_handle[i]) {
2559 			ulds[i].state_change(adap->uld_handle[i],
2560 					     CXGB4_STATE_DETACH);
2561 			adap->uld_handle[i] = NULL;
2562 		}
2563 	if (netevent_registered && list_empty(&adapter_list)) {
2564 		unregister_netevent_notifier(&cxgb4_netevent_nb);
2565 		netevent_registered = false;
2566 	}
2567 	mutex_unlock(&uld_mutex);
2568 }
2569 
notify_ulds(struct adapter * adap,enum cxgb4_state new_state)2570 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2571 {
2572 	unsigned int i;
2573 
2574 	mutex_lock(&uld_mutex);
2575 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2576 		if (adap->uld_handle[i])
2577 			ulds[i].state_change(adap->uld_handle[i], new_state);
2578 	mutex_unlock(&uld_mutex);
2579 }
2580 
2581 /**
2582  *	cxgb4_register_uld - register an upper-layer driver
2583  *	@type: the ULD type
2584  *	@p: the ULD methods
2585  *
2586  *	Registers an upper-layer driver with this driver and notifies the ULD
2587  *	about any presently available devices that support its type.  Returns
2588  *	%-EBUSY if a ULD of the same type is already registered.
2589  */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)2590 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2591 {
2592 	int ret = 0;
2593 	struct adapter *adap;
2594 
2595 	if (type >= CXGB4_ULD_MAX)
2596 		return -EINVAL;
2597 	mutex_lock(&uld_mutex);
2598 	if (ulds[type].add) {
2599 		ret = -EBUSY;
2600 		goto out;
2601 	}
2602 	ulds[type] = *p;
2603 	list_for_each_entry(adap, &adapter_list, list_node)
2604 		uld_attach(adap, type);
2605 out:	mutex_unlock(&uld_mutex);
2606 	return ret;
2607 }
2608 EXPORT_SYMBOL(cxgb4_register_uld);
2609 
2610 /**
2611  *	cxgb4_unregister_uld - unregister an upper-layer driver
2612  *	@type: the ULD type
2613  *
2614  *	Unregisters an existing upper-layer driver.
2615  */
cxgb4_unregister_uld(enum cxgb4_uld type)2616 int cxgb4_unregister_uld(enum cxgb4_uld type)
2617 {
2618 	struct adapter *adap;
2619 
2620 	if (type >= CXGB4_ULD_MAX)
2621 		return -EINVAL;
2622 	mutex_lock(&uld_mutex);
2623 	list_for_each_entry(adap, &adapter_list, list_node)
2624 		adap->uld_handle[type] = NULL;
2625 	ulds[type].add = NULL;
2626 	mutex_unlock(&uld_mutex);
2627 	return 0;
2628 }
2629 EXPORT_SYMBOL(cxgb4_unregister_uld);
2630 
2631 /**
2632  *	cxgb_up - enable the adapter
2633  *	@adap: adapter being enabled
2634  *
2635  *	Called when the first port is enabled, this function performs the
2636  *	actions necessary to make an adapter operational, such as completing
2637  *	the initialization of HW modules, and enabling interrupts.
2638  *
2639  *	Must be called with the rtnl lock held.
2640  */
cxgb_up(struct adapter * adap)2641 static int cxgb_up(struct adapter *adap)
2642 {
2643 	int err;
2644 
2645 	err = setup_sge_queues(adap);
2646 	if (err)
2647 		goto out;
2648 	err = setup_rss(adap);
2649 	if (err)
2650 		goto freeq;
2651 
2652 	if (adap->flags & USING_MSIX) {
2653 		name_msix_vecs(adap);
2654 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2655 				  adap->msix_info[0].desc, adap);
2656 		if (err)
2657 			goto irq_err;
2658 
2659 		err = request_msix_queue_irqs(adap);
2660 		if (err) {
2661 			free_irq(adap->msix_info[0].vec, adap);
2662 			goto irq_err;
2663 		}
2664 	} else {
2665 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2666 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2667 				  adap->port[0]->name, adap);
2668 		if (err)
2669 			goto irq_err;
2670 	}
2671 	enable_rx(adap);
2672 	t4_sge_start(adap);
2673 	t4_intr_enable(adap);
2674 	adap->flags |= FULL_INIT_DONE;
2675 	notify_ulds(adap, CXGB4_STATE_UP);
2676  out:
2677 	return err;
2678  irq_err:
2679 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2680  freeq:
2681 	t4_free_sge_resources(adap);
2682 	goto out;
2683 }
2684 
cxgb_down(struct adapter * adapter)2685 static void cxgb_down(struct adapter *adapter)
2686 {
2687 	t4_intr_disable(adapter);
2688 	cancel_work_sync(&adapter->tid_release_task);
2689 	adapter->tid_release_task_busy = false;
2690 	adapter->tid_release_head = NULL;
2691 
2692 	if (adapter->flags & USING_MSIX) {
2693 		free_msix_queue_irqs(adapter);
2694 		free_irq(adapter->msix_info[0].vec, adapter);
2695 	} else
2696 		free_irq(adapter->pdev->irq, adapter);
2697 	quiesce_rx(adapter);
2698 	t4_sge_stop(adapter);
2699 	t4_free_sge_resources(adapter);
2700 	adapter->flags &= ~FULL_INIT_DONE;
2701 }
2702 
2703 /*
2704  * net_device operations
2705  */
cxgb_open(struct net_device * dev)2706 static int cxgb_open(struct net_device *dev)
2707 {
2708 	int err;
2709 	struct port_info *pi = netdev_priv(dev);
2710 	struct adapter *adapter = pi->adapter;
2711 
2712 	netif_carrier_off(dev);
2713 
2714 	if (!(adapter->flags & FULL_INIT_DONE)) {
2715 		err = cxgb_up(adapter);
2716 		if (err < 0)
2717 			return err;
2718 	}
2719 
2720 	err = link_start(dev);
2721 	if (!err)
2722 		netif_tx_start_all_queues(dev);
2723 	return err;
2724 }
2725 
cxgb_close(struct net_device * dev)2726 static int cxgb_close(struct net_device *dev)
2727 {
2728 	struct port_info *pi = netdev_priv(dev);
2729 	struct adapter *adapter = pi->adapter;
2730 
2731 	netif_tx_stop_all_queues(dev);
2732 	netif_carrier_off(dev);
2733 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2734 }
2735 
cxgb_get_stats(struct net_device * dev,struct rtnl_link_stats64 * ns)2736 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2737 						struct rtnl_link_stats64 *ns)
2738 {
2739 	struct port_stats stats;
2740 	struct port_info *p = netdev_priv(dev);
2741 	struct adapter *adapter = p->adapter;
2742 
2743 	spin_lock(&adapter->stats_lock);
2744 	t4_get_port_stats(adapter, p->tx_chan, &stats);
2745 	spin_unlock(&adapter->stats_lock);
2746 
2747 	ns->tx_bytes   = stats.tx_octets;
2748 	ns->tx_packets = stats.tx_frames;
2749 	ns->rx_bytes   = stats.rx_octets;
2750 	ns->rx_packets = stats.rx_frames;
2751 	ns->multicast  = stats.rx_mcast_frames;
2752 
2753 	/* detailed rx_errors */
2754 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2755 			       stats.rx_runt;
2756 	ns->rx_over_errors   = 0;
2757 	ns->rx_crc_errors    = stats.rx_fcs_err;
2758 	ns->rx_frame_errors  = stats.rx_symbol_err;
2759 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
2760 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
2761 			       stats.rx_trunc0 + stats.rx_trunc1 +
2762 			       stats.rx_trunc2 + stats.rx_trunc3;
2763 	ns->rx_missed_errors = 0;
2764 
2765 	/* detailed tx_errors */
2766 	ns->tx_aborted_errors   = 0;
2767 	ns->tx_carrier_errors   = 0;
2768 	ns->tx_fifo_errors      = 0;
2769 	ns->tx_heartbeat_errors = 0;
2770 	ns->tx_window_errors    = 0;
2771 
2772 	ns->tx_errors = stats.tx_error_frames;
2773 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2774 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2775 	return ns;
2776 }
2777 
cxgb_ioctl(struct net_device * dev,struct ifreq * req,int cmd)2778 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2779 {
2780 	unsigned int mbox;
2781 	int ret = 0, prtad, devad;
2782 	struct port_info *pi = netdev_priv(dev);
2783 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2784 
2785 	switch (cmd) {
2786 	case SIOCGMIIPHY:
2787 		if (pi->mdio_addr < 0)
2788 			return -EOPNOTSUPP;
2789 		data->phy_id = pi->mdio_addr;
2790 		break;
2791 	case SIOCGMIIREG:
2792 	case SIOCSMIIREG:
2793 		if (mdio_phy_id_is_c45(data->phy_id)) {
2794 			prtad = mdio_phy_id_prtad(data->phy_id);
2795 			devad = mdio_phy_id_devad(data->phy_id);
2796 		} else if (data->phy_id < 32) {
2797 			prtad = data->phy_id;
2798 			devad = 0;
2799 			data->reg_num &= 0x1f;
2800 		} else
2801 			return -EINVAL;
2802 
2803 		mbox = pi->adapter->fn;
2804 		if (cmd == SIOCGMIIREG)
2805 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2806 					 data->reg_num, &data->val_out);
2807 		else
2808 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2809 					 data->reg_num, data->val_in);
2810 		break;
2811 	default:
2812 		return -EOPNOTSUPP;
2813 	}
2814 	return ret;
2815 }
2816 
cxgb_set_rxmode(struct net_device * dev)2817 static void cxgb_set_rxmode(struct net_device *dev)
2818 {
2819 	/* unfortunately we can't return errors to the stack */
2820 	set_rxmode(dev, -1, false);
2821 }
2822 
cxgb_change_mtu(struct net_device * dev,int new_mtu)2823 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2824 {
2825 	int ret;
2826 	struct port_info *pi = netdev_priv(dev);
2827 
2828 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
2829 		return -EINVAL;
2830 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2831 			    -1, -1, -1, true);
2832 	if (!ret)
2833 		dev->mtu = new_mtu;
2834 	return ret;
2835 }
2836 
cxgb_set_mac_addr(struct net_device * dev,void * p)2837 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2838 {
2839 	int ret;
2840 	struct sockaddr *addr = p;
2841 	struct port_info *pi = netdev_priv(dev);
2842 
2843 	if (!is_valid_ether_addr(addr->sa_data))
2844 		return -EINVAL;
2845 
2846 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2847 			    pi->xact_addr_filt, addr->sa_data, true, true);
2848 	if (ret < 0)
2849 		return ret;
2850 
2851 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2852 	pi->xact_addr_filt = ret;
2853 	return 0;
2854 }
2855 
2856 #ifdef CONFIG_NET_POLL_CONTROLLER
cxgb_netpoll(struct net_device * dev)2857 static void cxgb_netpoll(struct net_device *dev)
2858 {
2859 	struct port_info *pi = netdev_priv(dev);
2860 	struct adapter *adap = pi->adapter;
2861 
2862 	if (adap->flags & USING_MSIX) {
2863 		int i;
2864 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2865 
2866 		for (i = pi->nqsets; i; i--, rx++)
2867 			t4_sge_intr_msix(0, &rx->rspq);
2868 	} else
2869 		t4_intr_handler(adap)(0, adap);
2870 }
2871 #endif
2872 
2873 static const struct net_device_ops cxgb4_netdev_ops = {
2874 	.ndo_open             = cxgb_open,
2875 	.ndo_stop             = cxgb_close,
2876 	.ndo_start_xmit       = t4_eth_xmit,
2877 	.ndo_get_stats64      = cxgb_get_stats,
2878 	.ndo_set_rx_mode      = cxgb_set_rxmode,
2879 	.ndo_set_mac_address  = cxgb_set_mac_addr,
2880 	.ndo_validate_addr    = eth_validate_addr,
2881 	.ndo_do_ioctl         = cxgb_ioctl,
2882 	.ndo_change_mtu       = cxgb_change_mtu,
2883 #ifdef CONFIG_NET_POLL_CONTROLLER
2884 	.ndo_poll_controller  = cxgb_netpoll,
2885 #endif
2886 };
2887 
t4_fatal_err(struct adapter * adap)2888 void t4_fatal_err(struct adapter *adap)
2889 {
2890 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2891 	t4_intr_disable(adap);
2892 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2893 }
2894 
setup_memwin(struct adapter * adap)2895 static void setup_memwin(struct adapter *adap)
2896 {
2897 	u32 bar0;
2898 
2899 	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
2900 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2901 		     (bar0 + MEMWIN0_BASE) | BIR(0) |
2902 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2903 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2904 		     (bar0 + MEMWIN1_BASE) | BIR(0) |
2905 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2906 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2907 		     (bar0 + MEMWIN2_BASE) | BIR(0) |
2908 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2909 	if (adap->vres.ocq.size) {
2910 		unsigned int start, sz_kb;
2911 
2912 		start = pci_resource_start(adap->pdev, 2) +
2913 			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2914 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2915 		t4_write_reg(adap,
2916 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2917 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
2918 		t4_write_reg(adap,
2919 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2920 			     adap->vres.ocq.start);
2921 		t4_read_reg(adap,
2922 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2923 	}
2924 }
2925 
adap_init1(struct adapter * adap,struct fw_caps_config_cmd * c)2926 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2927 {
2928 	u32 v;
2929 	int ret;
2930 
2931 	/* get device capabilities */
2932 	memset(c, 0, sizeof(*c));
2933 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2934 			       FW_CMD_REQUEST | FW_CMD_READ);
2935 	c->retval_len16 = htonl(FW_LEN16(*c));
2936 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
2937 	if (ret < 0)
2938 		return ret;
2939 
2940 	/* select capabilities we'll be using */
2941 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2942 		if (!vf_acls)
2943 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2944 		else
2945 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2946 	} else if (vf_acls) {
2947 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2948 		return ret;
2949 	}
2950 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2951 			       FW_CMD_REQUEST | FW_CMD_WRITE);
2952 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
2953 	if (ret < 0)
2954 		return ret;
2955 
2956 	ret = t4_config_glbl_rss(adap, adap->fn,
2957 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2958 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2959 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2960 	if (ret < 0)
2961 		return ret;
2962 
2963 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2964 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2965 	if (ret < 0)
2966 		return ret;
2967 
2968 	t4_sge_init(adap);
2969 
2970 	/* tweak some settings */
2971 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2972 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2973 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2974 	v = t4_read_reg(adap, TP_PIO_DATA);
2975 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2976 
2977 	/* get basic stuff going */
2978 	return t4_early_init(adap, adap->fn);
2979 }
2980 
2981 /*
2982  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
2983  */
2984 #define MAX_ATIDS 8192U
2985 
2986 /*
2987  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2988  */
adap_init0(struct adapter * adap)2989 static int adap_init0(struct adapter *adap)
2990 {
2991 	int ret;
2992 	u32 v, port_vec;
2993 	enum dev_state state;
2994 	u32 params[7], val[7];
2995 	struct fw_caps_config_cmd c;
2996 
2997 	ret = t4_check_fw_version(adap);
2998 	if (ret == -EINVAL || ret > 0) {
2999 		if (upgrade_fw(adap) >= 0)             /* recache FW version */
3000 			ret = t4_check_fw_version(adap);
3001 	}
3002 	if (ret < 0)
3003 		return ret;
3004 
3005 	/* contact FW, request master */
3006 	ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
3007 	if (ret < 0) {
3008 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3009 			ret);
3010 		return ret;
3011 	}
3012 
3013 	/* reset device */
3014 	ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
3015 	if (ret < 0)
3016 		goto bye;
3017 
3018 	for (v = 0; v < SGE_NTIMERS - 1; v++)
3019 		adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3020 	adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3021 	adap->sge.counter_val[0] = 1;
3022 	for (v = 1; v < SGE_NCOUNTERS; v++)
3023 		adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3024 					       THRESHOLD_3_MASK);
3025 #define FW_PARAM_DEV(param) \
3026 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3027 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3028 
3029 	params[0] = FW_PARAM_DEV(CCLK);
3030 	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
3031 	if (ret < 0)
3032 		goto bye;
3033 	adap->params.vpd.cclk = val[0];
3034 
3035 	ret = adap_init1(adap, &c);
3036 	if (ret < 0)
3037 		goto bye;
3038 
3039 #define FW_PARAM_PFVF(param) \
3040 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3041 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3042 	 FW_PARAMS_PARAM_Y(adap->fn))
3043 
3044 	params[0] = FW_PARAM_DEV(PORTVEC);
3045 	params[1] = FW_PARAM_PFVF(L2T_START);
3046 	params[2] = FW_PARAM_PFVF(L2T_END);
3047 	params[3] = FW_PARAM_PFVF(FILTER_START);
3048 	params[4] = FW_PARAM_PFVF(FILTER_END);
3049 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
3050 	params[6] = FW_PARAM_PFVF(EQ_START);
3051 	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3052 	if (ret < 0)
3053 		goto bye;
3054 	port_vec = val[0];
3055 	adap->tids.ftid_base = val[3];
3056 	adap->tids.nftids = val[4] - val[3] + 1;
3057 	adap->sge.ingr_start = val[5];
3058 	adap->sge.egr_start = val[6];
3059 
3060 	if (c.ofldcaps) {
3061 		/* query offload-related parameters */
3062 		params[0] = FW_PARAM_DEV(NTID);
3063 		params[1] = FW_PARAM_PFVF(SERVER_START);
3064 		params[2] = FW_PARAM_PFVF(SERVER_END);
3065 		params[3] = FW_PARAM_PFVF(TDDP_START);
3066 		params[4] = FW_PARAM_PFVF(TDDP_END);
3067 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3068 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3069 				      val);
3070 		if (ret < 0)
3071 			goto bye;
3072 		adap->tids.ntids = val[0];
3073 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3074 		adap->tids.stid_base = val[1];
3075 		adap->tids.nstids = val[2] - val[1] + 1;
3076 		adap->vres.ddp.start = val[3];
3077 		adap->vres.ddp.size = val[4] - val[3] + 1;
3078 		adap->params.ofldq_wr_cred = val[5];
3079 		adap->params.offload = 1;
3080 	}
3081 	if (c.rdmacaps) {
3082 		params[0] = FW_PARAM_PFVF(STAG_START);
3083 		params[1] = FW_PARAM_PFVF(STAG_END);
3084 		params[2] = FW_PARAM_PFVF(RQ_START);
3085 		params[3] = FW_PARAM_PFVF(RQ_END);
3086 		params[4] = FW_PARAM_PFVF(PBL_START);
3087 		params[5] = FW_PARAM_PFVF(PBL_END);
3088 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3089 				      val);
3090 		if (ret < 0)
3091 			goto bye;
3092 		adap->vres.stag.start = val[0];
3093 		adap->vres.stag.size = val[1] - val[0] + 1;
3094 		adap->vres.rq.start = val[2];
3095 		adap->vres.rq.size = val[3] - val[2] + 1;
3096 		adap->vres.pbl.start = val[4];
3097 		adap->vres.pbl.size = val[5] - val[4] + 1;
3098 
3099 		params[0] = FW_PARAM_PFVF(SQRQ_START);
3100 		params[1] = FW_PARAM_PFVF(SQRQ_END);
3101 		params[2] = FW_PARAM_PFVF(CQ_START);
3102 		params[3] = FW_PARAM_PFVF(CQ_END);
3103 		params[4] = FW_PARAM_PFVF(OCQ_START);
3104 		params[5] = FW_PARAM_PFVF(OCQ_END);
3105 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3106 				      val);
3107 		if (ret < 0)
3108 			goto bye;
3109 		adap->vres.qp.start = val[0];
3110 		adap->vres.qp.size = val[1] - val[0] + 1;
3111 		adap->vres.cq.start = val[2];
3112 		adap->vres.cq.size = val[3] - val[2] + 1;
3113 		adap->vres.ocq.start = val[4];
3114 		adap->vres.ocq.size = val[5] - val[4] + 1;
3115 	}
3116 	if (c.iscsicaps) {
3117 		params[0] = FW_PARAM_PFVF(ISCSI_START);
3118 		params[1] = FW_PARAM_PFVF(ISCSI_END);
3119 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3120 				      val);
3121 		if (ret < 0)
3122 			goto bye;
3123 		adap->vres.iscsi.start = val[0];
3124 		adap->vres.iscsi.size = val[1] - val[0] + 1;
3125 	}
3126 #undef FW_PARAM_PFVF
3127 #undef FW_PARAM_DEV
3128 
3129 	adap->params.nports = hweight32(port_vec);
3130 	adap->params.portvec = port_vec;
3131 	adap->flags |= FW_OK;
3132 
3133 	/* These are finalized by FW initialization, load their values now */
3134 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3135 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3136 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3137 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3138 		     adap->params.b_wnd);
3139 
3140 #ifdef CONFIG_PCI_IOV
3141 	/*
3142 	 * Provision resource limits for Virtual Functions.  We currently
3143 	 * grant them all the same static resource limits except for the Port
3144 	 * Access Rights Mask which we're assigning based on the PF.  All of
3145 	 * the static provisioning stuff for both the PF and VF really needs
3146 	 * to be managed in a persistent manner for each device which the
3147 	 * firmware controls.
3148 	 */
3149 	{
3150 		int pf, vf;
3151 
3152 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3153 			if (num_vf[pf] <= 0)
3154 				continue;
3155 
3156 			/* VF numbering starts at 1! */
3157 			for (vf = 1; vf <= num_vf[pf]; vf++) {
3158 				ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3159 						  VFRES_NEQ, VFRES_NETHCTRL,
3160 						  VFRES_NIQFLINT, VFRES_NIQ,
3161 						  VFRES_TC, VFRES_NVI,
3162 						  FW_PFVF_CMD_CMASK_MASK,
3163 						  pfvfres_pmask(adap, pf, vf),
3164 						  VFRES_NEXACTF,
3165 						  VFRES_R_CAPS, VFRES_WX_CAPS);
3166 				if (ret < 0)
3167 					dev_warn(adap->pdev_dev, "failed to "
3168 						 "provision pf/vf=%d/%d; "
3169 						 "err=%d\n", pf, vf, ret);
3170 			}
3171 		}
3172 	}
3173 #endif
3174 
3175 	setup_memwin(adap);
3176 	return 0;
3177 
3178 	/*
3179 	 * If a command timed out or failed with EIO FW does not operate within
3180 	 * its spec or something catastrophic happened to HW/FW, stop issuing
3181 	 * commands.
3182 	 */
3183 bye:	if (ret != -ETIMEDOUT && ret != -EIO)
3184 		t4_fw_bye(adap, adap->fn);
3185 	return ret;
3186 }
3187 
3188 /* EEH callbacks */
3189 
eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)3190 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3191 					 pci_channel_state_t state)
3192 {
3193 	int i;
3194 	struct adapter *adap = pci_get_drvdata(pdev);
3195 
3196 	if (!adap)
3197 		goto out;
3198 
3199 	rtnl_lock();
3200 	adap->flags &= ~FW_OK;
3201 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3202 	for_each_port(adap, i) {
3203 		struct net_device *dev = adap->port[i];
3204 
3205 		netif_device_detach(dev);
3206 		netif_carrier_off(dev);
3207 	}
3208 	if (adap->flags & FULL_INIT_DONE)
3209 		cxgb_down(adap);
3210 	rtnl_unlock();
3211 	pci_disable_device(pdev);
3212 out:	return state == pci_channel_io_perm_failure ?
3213 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3214 }
3215 
eeh_slot_reset(struct pci_dev * pdev)3216 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3217 {
3218 	int i, ret;
3219 	struct fw_caps_config_cmd c;
3220 	struct adapter *adap = pci_get_drvdata(pdev);
3221 
3222 	if (!adap) {
3223 		pci_restore_state(pdev);
3224 		pci_save_state(pdev);
3225 		return PCI_ERS_RESULT_RECOVERED;
3226 	}
3227 
3228 	if (pci_enable_device(pdev)) {
3229 		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3230 		return PCI_ERS_RESULT_DISCONNECT;
3231 	}
3232 
3233 	pci_set_master(pdev);
3234 	pci_restore_state(pdev);
3235 	pci_save_state(pdev);
3236 	pci_cleanup_aer_uncorrect_error_status(pdev);
3237 
3238 	if (t4_wait_dev_ready(adap) < 0)
3239 		return PCI_ERS_RESULT_DISCONNECT;
3240 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
3241 		return PCI_ERS_RESULT_DISCONNECT;
3242 	adap->flags |= FW_OK;
3243 	if (adap_init1(adap, &c))
3244 		return PCI_ERS_RESULT_DISCONNECT;
3245 
3246 	for_each_port(adap, i) {
3247 		struct port_info *p = adap2pinfo(adap, i);
3248 
3249 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3250 				  NULL, NULL);
3251 		if (ret < 0)
3252 			return PCI_ERS_RESULT_DISCONNECT;
3253 		p->viid = ret;
3254 		p->xact_addr_filt = -1;
3255 	}
3256 
3257 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3258 		     adap->params.b_wnd);
3259 	setup_memwin(adap);
3260 	if (cxgb_up(adap))
3261 		return PCI_ERS_RESULT_DISCONNECT;
3262 	return PCI_ERS_RESULT_RECOVERED;
3263 }
3264 
eeh_resume(struct pci_dev * pdev)3265 static void eeh_resume(struct pci_dev *pdev)
3266 {
3267 	int i;
3268 	struct adapter *adap = pci_get_drvdata(pdev);
3269 
3270 	if (!adap)
3271 		return;
3272 
3273 	rtnl_lock();
3274 	for_each_port(adap, i) {
3275 		struct net_device *dev = adap->port[i];
3276 
3277 		if (netif_running(dev)) {
3278 			link_start(dev);
3279 			cxgb_set_rxmode(dev);
3280 		}
3281 		netif_device_attach(dev);
3282 	}
3283 	rtnl_unlock();
3284 }
3285 
3286 static struct pci_error_handlers cxgb4_eeh = {
3287 	.error_detected = eeh_err_detected,
3288 	.slot_reset     = eeh_slot_reset,
3289 	.resume         = eeh_resume,
3290 };
3291 
is_10g_port(const struct link_config * lc)3292 static inline bool is_10g_port(const struct link_config *lc)
3293 {
3294 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3295 }
3296 
init_rspq(struct sge_rspq * q,u8 timer_idx,u8 pkt_cnt_idx,unsigned int size,unsigned int iqe_size)3297 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3298 			     unsigned int size, unsigned int iqe_size)
3299 {
3300 	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3301 			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3302 	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3303 	q->iqe_len = iqe_size;
3304 	q->size = size;
3305 }
3306 
3307 /*
3308  * Perform default configuration of DMA queues depending on the number and type
3309  * of ports we found and the number of available CPUs.  Most settings can be
3310  * modified by the admin prior to actual use.
3311  */
cfg_queues(struct adapter * adap)3312 static void __devinit cfg_queues(struct adapter *adap)
3313 {
3314 	struct sge *s = &adap->sge;
3315 	int i, q10g = 0, n10g = 0, qidx = 0;
3316 
3317 	for_each_port(adap, i)
3318 		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3319 
3320 	/*
3321 	 * We default to 1 queue per non-10G port and up to # of cores queues
3322 	 * per 10G port.
3323 	 */
3324 	if (n10g)
3325 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3326 	if (q10g > num_online_cpus())
3327 		q10g = num_online_cpus();
3328 
3329 	for_each_port(adap, i) {
3330 		struct port_info *pi = adap2pinfo(adap, i);
3331 
3332 		pi->first_qset = qidx;
3333 		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3334 		qidx += pi->nqsets;
3335 	}
3336 
3337 	s->ethqsets = qidx;
3338 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
3339 
3340 	if (is_offload(adap)) {
3341 		/*
3342 		 * For offload we use 1 queue/channel if all ports are up to 1G,
3343 		 * otherwise we divide all available queues amongst the channels
3344 		 * capped by the number of available cores.
3345 		 */
3346 		if (n10g) {
3347 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3348 				  num_online_cpus());
3349 			s->ofldqsets = roundup(i, adap->params.nports);
3350 		} else
3351 			s->ofldqsets = adap->params.nports;
3352 		/* For RDMA one Rx queue per channel suffices */
3353 		s->rdmaqs = adap->params.nports;
3354 	}
3355 
3356 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3357 		struct sge_eth_rxq *r = &s->ethrxq[i];
3358 
3359 		init_rspq(&r->rspq, 0, 0, 1024, 64);
3360 		r->fl.size = 72;
3361 	}
3362 
3363 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3364 		s->ethtxq[i].q.size = 1024;
3365 
3366 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3367 		s->ctrlq[i].q.size = 512;
3368 
3369 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3370 		s->ofldtxq[i].q.size = 1024;
3371 
3372 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3373 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
3374 
3375 		init_rspq(&r->rspq, 0, 0, 1024, 64);
3376 		r->rspq.uld = CXGB4_ULD_ISCSI;
3377 		r->fl.size = 72;
3378 	}
3379 
3380 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3381 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
3382 
3383 		init_rspq(&r->rspq, 0, 0, 511, 64);
3384 		r->rspq.uld = CXGB4_ULD_RDMA;
3385 		r->fl.size = 72;
3386 	}
3387 
3388 	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3389 	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3390 }
3391 
3392 /*
3393  * Reduce the number of Ethernet queues across all ports to at most n.
3394  * n provides at least one queue per port.
3395  */
reduce_ethqs(struct adapter * adap,int n)3396 static void __devinit reduce_ethqs(struct adapter *adap, int n)
3397 {
3398 	int i;
3399 	struct port_info *pi;
3400 
3401 	while (n < adap->sge.ethqsets)
3402 		for_each_port(adap, i) {
3403 			pi = adap2pinfo(adap, i);
3404 			if (pi->nqsets > 1) {
3405 				pi->nqsets--;
3406 				adap->sge.ethqsets--;
3407 				if (adap->sge.ethqsets <= n)
3408 					break;
3409 			}
3410 		}
3411 
3412 	n = 0;
3413 	for_each_port(adap, i) {
3414 		pi = adap2pinfo(adap, i);
3415 		pi->first_qset = n;
3416 		n += pi->nqsets;
3417 	}
3418 }
3419 
3420 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3421 #define EXTRA_VECS 2
3422 
enable_msix(struct adapter * adap)3423 static int __devinit enable_msix(struct adapter *adap)
3424 {
3425 	int ofld_need = 0;
3426 	int i, err, want, need;
3427 	struct sge *s = &adap->sge;
3428 	unsigned int nchan = adap->params.nports;
3429 	struct msix_entry entries[MAX_INGQ + 1];
3430 
3431 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
3432 		entries[i].entry = i;
3433 
3434 	want = s->max_ethqsets + EXTRA_VECS;
3435 	if (is_offload(adap)) {
3436 		want += s->rdmaqs + s->ofldqsets;
3437 		/* need nchan for each possible ULD */
3438 		ofld_need = 2 * nchan;
3439 	}
3440 	need = adap->params.nports + EXTRA_VECS + ofld_need;
3441 
3442 	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3443 		want = err;
3444 
3445 	if (!err) {
3446 		/*
3447 		 * Distribute available vectors to the various queue groups.
3448 		 * Every group gets its minimum requirement and NIC gets top
3449 		 * priority for leftovers.
3450 		 */
3451 		i = want - EXTRA_VECS - ofld_need;
3452 		if (i < s->max_ethqsets) {
3453 			s->max_ethqsets = i;
3454 			if (i < s->ethqsets)
3455 				reduce_ethqs(adap, i);
3456 		}
3457 		if (is_offload(adap)) {
3458 			i = want - EXTRA_VECS - s->max_ethqsets;
3459 			i -= ofld_need - nchan;
3460 			s->ofldqsets = (i / nchan) * nchan;  /* round down */
3461 		}
3462 		for (i = 0; i < want; ++i)
3463 			adap->msix_info[i].vec = entries[i].vector;
3464 	} else if (err > 0)
3465 		dev_info(adap->pdev_dev,
3466 			 "only %d MSI-X vectors left, not using MSI-X\n", err);
3467 	return err;
3468 }
3469 
3470 #undef EXTRA_VECS
3471 
init_rss(struct adapter * adap)3472 static int __devinit init_rss(struct adapter *adap)
3473 {
3474 	unsigned int i, j;
3475 
3476 	for_each_port(adap, i) {
3477 		struct port_info *pi = adap2pinfo(adap, i);
3478 
3479 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3480 		if (!pi->rss)
3481 			return -ENOMEM;
3482 		for (j = 0; j < pi->rss_size; j++)
3483 			pi->rss[j] = j % pi->nqsets;
3484 	}
3485 	return 0;
3486 }
3487 
print_port_info(const struct net_device * dev)3488 static void __devinit print_port_info(const struct net_device *dev)
3489 {
3490 	static const char *base[] = {
3491 		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3492 		"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
3493 	};
3494 
3495 	char buf[80];
3496 	char *bufp = buf;
3497 	const char *spd = "";
3498 	const struct port_info *pi = netdev_priv(dev);
3499 	const struct adapter *adap = pi->adapter;
3500 
3501 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3502 		spd = " 2.5 GT/s";
3503 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3504 		spd = " 5 GT/s";
3505 
3506 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3507 		bufp += sprintf(bufp, "100/");
3508 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3509 		bufp += sprintf(bufp, "1000/");
3510 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3511 		bufp += sprintf(bufp, "10G/");
3512 	if (bufp != buf)
3513 		--bufp;
3514 	sprintf(bufp, "BASE-%s", base[pi->port_type]);
3515 
3516 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3517 		    adap->params.vpd.id, adap->params.rev, buf,
3518 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3519 		    (adap->flags & USING_MSIX) ? " MSI-X" :
3520 		    (adap->flags & USING_MSI) ? " MSI" : "");
3521 	netdev_info(dev, "S/N: %s, E/C: %s\n",
3522 		    adap->params.vpd.sn, adap->params.vpd.ec);
3523 }
3524 
enable_pcie_relaxed_ordering(struct pci_dev * dev)3525 static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3526 {
3527 	u16 v;
3528 	int pos;
3529 
3530 	pos = pci_pcie_cap(dev);
3531 	if (pos > 0) {
3532 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3533 		v |= PCI_EXP_DEVCTL_RELAX_EN;
3534 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3535 	}
3536 }
3537 
3538 /*
3539  * Free the following resources:
3540  * - memory used for tables
3541  * - MSI/MSI-X
3542  * - net devices
3543  * - resources FW is holding for us
3544  */
free_some_resources(struct adapter * adapter)3545 static void free_some_resources(struct adapter *adapter)
3546 {
3547 	unsigned int i;
3548 
3549 	t4_free_mem(adapter->l2t);
3550 	t4_free_mem(adapter->tids.tid_tab);
3551 	disable_msi(adapter);
3552 
3553 	for_each_port(adapter, i)
3554 		if (adapter->port[i]) {
3555 			kfree(adap2pinfo(adapter, i)->rss);
3556 			free_netdev(adapter->port[i]);
3557 		}
3558 	if (adapter->flags & FW_OK)
3559 		t4_fw_bye(adapter, adapter->fn);
3560 }
3561 
3562 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3563 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3564 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)3565 static int __devinit init_one(struct pci_dev *pdev,
3566 			      const struct pci_device_id *ent)
3567 {
3568 	int func, i, err;
3569 	struct port_info *pi;
3570 	unsigned int highdma = 0;
3571 	struct adapter *adapter = NULL;
3572 
3573 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3574 
3575 	err = pci_request_regions(pdev, KBUILD_MODNAME);
3576 	if (err) {
3577 		/* Just info, some other driver may have claimed the device. */
3578 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3579 		return err;
3580 	}
3581 
3582 	/* We control everything through one PF */
3583 	func = PCI_FUNC(pdev->devfn);
3584 	if (func != ent->driver_data) {
3585 		pci_save_state(pdev);        /* to restore SR-IOV later */
3586 		goto sriov;
3587 	}
3588 
3589 	err = pci_enable_device(pdev);
3590 	if (err) {
3591 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3592 		goto out_release_regions;
3593 	}
3594 
3595 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3596 		highdma = NETIF_F_HIGHDMA;
3597 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3598 		if (err) {
3599 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3600 				"coherent allocations\n");
3601 			goto out_disable_device;
3602 		}
3603 	} else {
3604 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3605 		if (err) {
3606 			dev_err(&pdev->dev, "no usable DMA configuration\n");
3607 			goto out_disable_device;
3608 		}
3609 	}
3610 
3611 	pci_enable_pcie_error_reporting(pdev);
3612 	enable_pcie_relaxed_ordering(pdev);
3613 	pci_set_master(pdev);
3614 	pci_save_state(pdev);
3615 
3616 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3617 	if (!adapter) {
3618 		err = -ENOMEM;
3619 		goto out_disable_device;
3620 	}
3621 
3622 	adapter->regs = pci_ioremap_bar(pdev, 0);
3623 	if (!adapter->regs) {
3624 		dev_err(&pdev->dev, "cannot map device registers\n");
3625 		err = -ENOMEM;
3626 		goto out_free_adapter;
3627 	}
3628 
3629 	adapter->pdev = pdev;
3630 	adapter->pdev_dev = &pdev->dev;
3631 	adapter->fn = func;
3632 	adapter->msg_enable = dflt_msg_enable;
3633 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3634 
3635 	spin_lock_init(&adapter->stats_lock);
3636 	spin_lock_init(&adapter->tid_release_lock);
3637 
3638 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3639 
3640 	err = t4_prep_adapter(adapter);
3641 	if (err)
3642 		goto out_unmap_bar;
3643 	err = adap_init0(adapter);
3644 	if (err)
3645 		goto out_unmap_bar;
3646 
3647 	for_each_port(adapter, i) {
3648 		struct net_device *netdev;
3649 
3650 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3651 					   MAX_ETH_QSETS);
3652 		if (!netdev) {
3653 			err = -ENOMEM;
3654 			goto out_free_dev;
3655 		}
3656 
3657 		SET_NETDEV_DEV(netdev, &pdev->dev);
3658 
3659 		adapter->port[i] = netdev;
3660 		pi = netdev_priv(netdev);
3661 		pi->adapter = adapter;
3662 		pi->xact_addr_filt = -1;
3663 		pi->rx_offload = RX_CSO;
3664 		pi->port_id = i;
3665 		netdev->irq = pdev->irq;
3666 
3667 		netdev->features |= NETIF_F_SG | TSO_FLAGS;
3668 		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3669 		netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3670 		netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3671 		netdev->vlan_features = netdev->features & VLAN_FEAT;
3672 
3673 		netdev->netdev_ops = &cxgb4_netdev_ops;
3674 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3675 	}
3676 
3677 	pci_set_drvdata(pdev, adapter);
3678 
3679 	if (adapter->flags & FW_OK) {
3680 		err = t4_port_init(adapter, func, func, 0);
3681 		if (err)
3682 			goto out_free_dev;
3683 	}
3684 
3685 	/*
3686 	 * Configure queues and allocate tables now, they can be needed as
3687 	 * soon as the first register_netdev completes.
3688 	 */
3689 	cfg_queues(adapter);
3690 
3691 	adapter->l2t = t4_init_l2t();
3692 	if (!adapter->l2t) {
3693 		/* We tolerate a lack of L2T, giving up some functionality */
3694 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3695 		adapter->params.offload = 0;
3696 	}
3697 
3698 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3699 		dev_warn(&pdev->dev, "could not allocate TID table, "
3700 			 "continuing\n");
3701 		adapter->params.offload = 0;
3702 	}
3703 
3704 	/* See what interrupts we'll be using */
3705 	if (msi > 1 && enable_msix(adapter) == 0)
3706 		adapter->flags |= USING_MSIX;
3707 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3708 		adapter->flags |= USING_MSI;
3709 
3710 	err = init_rss(adapter);
3711 	if (err)
3712 		goto out_free_dev;
3713 
3714 	/*
3715 	 * The card is now ready to go.  If any errors occur during device
3716 	 * registration we do not fail the whole card but rather proceed only
3717 	 * with the ports we manage to register successfully.  However we must
3718 	 * register at least one net device.
3719 	 */
3720 	for_each_port(adapter, i) {
3721 		pi = adap2pinfo(adapter, i);
3722 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3723 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3724 
3725 		err = register_netdev(adapter->port[i]);
3726 		if (err)
3727 			break;
3728 		adapter->chan_map[pi->tx_chan] = i;
3729 		print_port_info(adapter->port[i]);
3730 	}
3731 	if (i == 0) {
3732 		dev_err(&pdev->dev, "could not register any net devices\n");
3733 		goto out_free_dev;
3734 	}
3735 	if (err) {
3736 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3737 		err = 0;
3738 	};
3739 
3740 	if (cxgb4_debugfs_root) {
3741 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3742 							   cxgb4_debugfs_root);
3743 		setup_debugfs(adapter);
3744 	}
3745 
3746 	if (is_offload(adapter))
3747 		attach_ulds(adapter);
3748 
3749 sriov:
3750 #ifdef CONFIG_PCI_IOV
3751 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3752 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3753 			dev_info(&pdev->dev,
3754 				 "instantiated %u virtual functions\n",
3755 				 num_vf[func]);
3756 #endif
3757 	return 0;
3758 
3759  out_free_dev:
3760 	free_some_resources(adapter);
3761  out_unmap_bar:
3762 	iounmap(adapter->regs);
3763  out_free_adapter:
3764 	kfree(adapter);
3765  out_disable_device:
3766 	pci_disable_pcie_error_reporting(pdev);
3767 	pci_disable_device(pdev);
3768  out_release_regions:
3769 	pci_release_regions(pdev);
3770 	pci_set_drvdata(pdev, NULL);
3771 	return err;
3772 }
3773 
remove_one(struct pci_dev * pdev)3774 static void __devexit remove_one(struct pci_dev *pdev)
3775 {
3776 	struct adapter *adapter = pci_get_drvdata(pdev);
3777 
3778 	pci_disable_sriov(pdev);
3779 
3780 	if (adapter) {
3781 		int i;
3782 
3783 		if (is_offload(adapter))
3784 			detach_ulds(adapter);
3785 
3786 		for_each_port(adapter, i)
3787 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
3788 				unregister_netdev(adapter->port[i]);
3789 
3790 		if (adapter->debugfs_root)
3791 			debugfs_remove_recursive(adapter->debugfs_root);
3792 
3793 		if (adapter->flags & FULL_INIT_DONE)
3794 			cxgb_down(adapter);
3795 
3796 		free_some_resources(adapter);
3797 		iounmap(adapter->regs);
3798 		kfree(adapter);
3799 		pci_disable_pcie_error_reporting(pdev);
3800 		pci_disable_device(pdev);
3801 		pci_release_regions(pdev);
3802 		pci_set_drvdata(pdev, NULL);
3803 	} else
3804 		pci_release_regions(pdev);
3805 }
3806 
3807 static struct pci_driver cxgb4_driver = {
3808 	.name     = KBUILD_MODNAME,
3809 	.id_table = cxgb4_pci_tbl,
3810 	.probe    = init_one,
3811 	.remove   = __devexit_p(remove_one),
3812 	.err_handler = &cxgb4_eeh,
3813 };
3814 
cxgb4_init_module(void)3815 static int __init cxgb4_init_module(void)
3816 {
3817 	int ret;
3818 
3819 	/* Debugfs support is optional, just warn if this fails */
3820 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3821 	if (!cxgb4_debugfs_root)
3822 		pr_warning("could not create debugfs entry, continuing\n");
3823 
3824 	ret = pci_register_driver(&cxgb4_driver);
3825 	if (ret < 0)
3826 		debugfs_remove(cxgb4_debugfs_root);
3827 	return ret;
3828 }
3829 
cxgb4_cleanup_module(void)3830 static void __exit cxgb4_cleanup_module(void)
3831 {
3832 	pci_unregister_driver(&cxgb4_driver);
3833 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
3834 }
3835 
3836 module_init(cxgb4_init_module);
3837 module_exit(cxgb4_cleanup_module);
3838