1 /*
2  * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <net/neighbour.h>
36 #include <linux/notifier.h>
37 #include <asm/atomic.h>
38 #include <linux/proc_fs.h>
39 #include <linux/if_vlan.h>
40 #include <net/netevent.h>
41 #include <linux/highmem.h>
42 #include <linux/vmalloc.h>
43 
44 #include "common.h"
45 #include "regs.h"
46 #include "cxgb3_ioctl.h"
47 #include "cxgb3_ctl_defs.h"
48 #include "cxgb3_defs.h"
49 #include "l2t.h"
50 #include "firmware_exports.h"
51 #include "cxgb3_offload.h"
52 
53 static LIST_HEAD(client_list);
54 static LIST_HEAD(ofld_dev_list);
55 static DEFINE_MUTEX(cxgb3_db_lock);
56 
57 static DEFINE_RWLOCK(adapter_list_lock);
58 static LIST_HEAD(adapter_list);
59 
60 static const unsigned int MAX_ATIDS = 64 * 1024;
61 static const unsigned int ATID_BASE = 0x10000;
62 
63 static void cxgb_neigh_update(struct neighbour *neigh);
64 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
65 
offload_activated(struct t3cdev * tdev)66 static inline int offload_activated(struct t3cdev *tdev)
67 {
68 	const struct adapter *adapter = tdev2adap(tdev);
69 
70 	return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
71 }
72 
73 /**
74  *	cxgb3_register_client - register an offload client
75  *	@client: the client
76  *
77  *	Add the client to the client list,
78  *	and call backs the client for each activated offload device
79  */
cxgb3_register_client(struct cxgb3_client * client)80 void cxgb3_register_client(struct cxgb3_client *client)
81 {
82 	struct t3cdev *tdev;
83 
84 	mutex_lock(&cxgb3_db_lock);
85 	list_add_tail(&client->client_list, &client_list);
86 
87 	if (client->add) {
88 		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
89 			if (offload_activated(tdev))
90 				client->add(tdev);
91 		}
92 	}
93 	mutex_unlock(&cxgb3_db_lock);
94 }
95 
96 EXPORT_SYMBOL(cxgb3_register_client);
97 
98 /**
99  *	cxgb3_unregister_client - unregister an offload client
100  *	@client: the client
101  *
102  *	Remove the client to the client list,
103  *	and call backs the client for each activated offload device.
104  */
cxgb3_unregister_client(struct cxgb3_client * client)105 void cxgb3_unregister_client(struct cxgb3_client *client)
106 {
107 	struct t3cdev *tdev;
108 
109 	mutex_lock(&cxgb3_db_lock);
110 	list_del(&client->client_list);
111 
112 	if (client->remove) {
113 		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
114 			if (offload_activated(tdev))
115 				client->remove(tdev);
116 		}
117 	}
118 	mutex_unlock(&cxgb3_db_lock);
119 }
120 
121 EXPORT_SYMBOL(cxgb3_unregister_client);
122 
123 /**
124  *	cxgb3_add_clients - activate registered clients for an offload device
125  *	@tdev: the offload device
126  *
127  *	Call backs all registered clients once a offload device is activated
128  */
cxgb3_add_clients(struct t3cdev * tdev)129 void cxgb3_add_clients(struct t3cdev *tdev)
130 {
131 	struct cxgb3_client *client;
132 
133 	mutex_lock(&cxgb3_db_lock);
134 	list_for_each_entry(client, &client_list, client_list) {
135 		if (client->add)
136 			client->add(tdev);
137 	}
138 	mutex_unlock(&cxgb3_db_lock);
139 }
140 
141 /**
142  *	cxgb3_remove_clients - deactivates registered clients
143  *			       for an offload device
144  *	@tdev: the offload device
145  *
146  *	Call backs all registered clients once a offload device is deactivated
147  */
cxgb3_remove_clients(struct t3cdev * tdev)148 void cxgb3_remove_clients(struct t3cdev *tdev)
149 {
150 	struct cxgb3_client *client;
151 
152 	mutex_lock(&cxgb3_db_lock);
153 	list_for_each_entry(client, &client_list, client_list) {
154 		if (client->remove)
155 			client->remove(tdev);
156 	}
157 	mutex_unlock(&cxgb3_db_lock);
158 }
159 
cxgb3_event_notify(struct t3cdev * tdev,u32 event,u32 port)160 void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
161 {
162 	struct cxgb3_client *client;
163 
164 	mutex_lock(&cxgb3_db_lock);
165 	list_for_each_entry(client, &client_list, client_list) {
166 		if (client->event_handler)
167 			client->event_handler(tdev, event, port);
168 	}
169 	mutex_unlock(&cxgb3_db_lock);
170 }
171 
get_iff_from_mac(struct adapter * adapter,const unsigned char * mac,unsigned int vlan)172 static struct net_device *get_iff_from_mac(struct adapter *adapter,
173 					   const unsigned char *mac,
174 					   unsigned int vlan)
175 {
176 	int i;
177 
178 	for_each_port(adapter, i) {
179 		struct vlan_group *grp;
180 		struct net_device *dev = adapter->port[i];
181 		const struct port_info *p = netdev_priv(dev);
182 
183 		if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
184 			if (vlan && vlan != VLAN_VID_MASK) {
185 				grp = p->vlan_grp;
186 				dev = NULL;
187 				if (grp)
188 					dev = vlan_group_get_device(grp, vlan);
189 			} else if (netif_is_bond_slave(dev)) {
190 				while (dev->master)
191 					dev = dev->master;
192 			}
193 			return dev;
194 		}
195 	}
196 	return NULL;
197 }
198 
cxgb_ulp_iscsi_ctl(struct adapter * adapter,unsigned int req,void * data)199 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
200 			      void *data)
201 {
202 	int i;
203 	int ret = 0;
204 	unsigned int val = 0;
205 	struct ulp_iscsi_info *uiip = data;
206 
207 	switch (req) {
208 	case ULP_ISCSI_GET_PARAMS:
209 		uiip->pdev = adapter->pdev;
210 		uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
211 		uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
212 		uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
213 
214 		val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
215 		for (i = 0; i < 4; i++, val >>= 8)
216 			uiip->pgsz_factor[i] = val & 0xFF;
217 
218 		val = t3_read_reg(adapter, A_TP_PARA_REG7);
219 		uiip->max_txsz =
220 		uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
221 				     (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
222 		/*
223 		 * On tx, the iscsi pdu has to be <= tx page size and has to
224 		 * fit into the Tx PM FIFO.
225 		 */
226 		val = min(adapter->params.tp.tx_pg_size,
227 			  t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
228 		uiip->max_txsz = min(val, uiip->max_txsz);
229 
230 		/* set MaxRxData to 16224 */
231 		val = t3_read_reg(adapter, A_TP_PARA_REG2);
232 		if ((val >> S_MAXRXDATA) != 0x3f60) {
233 			val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
234 			val |= V_MAXRXDATA(0x3f60);
235 			printk(KERN_INFO
236 				"%s, iscsi set MaxRxData to 16224 (0x%x).\n",
237 				adapter->name, val);
238 			t3_write_reg(adapter, A_TP_PARA_REG2, val);
239 		}
240 
241 		/*
242 		 * on rx, the iscsi pdu has to be < rx page size and the
243 		 * the max rx data length programmed in TP
244 		 */
245 		val = min(adapter->params.tp.rx_pg_size,
246 			  ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
247 				S_MAXRXDATA) & M_MAXRXDATA);
248 		uiip->max_rxsz = min(val, uiip->max_rxsz);
249 		break;
250 	case ULP_ISCSI_SET_PARAMS:
251 		t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
252 		/* program the ddp page sizes */
253 		for (i = 0; i < 4; i++)
254 			val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
255 		if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
256 			printk(KERN_INFO
257 				"%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
258 				adapter->name, val, uiip->pgsz_factor[0],
259 				uiip->pgsz_factor[1], uiip->pgsz_factor[2],
260 				uiip->pgsz_factor[3]);
261 			t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
262 		}
263 		break;
264 	default:
265 		ret = -EOPNOTSUPP;
266 	}
267 	return ret;
268 }
269 
270 /* Response queue used for RDMA events. */
271 #define ASYNC_NOTIF_RSPQ 0
272 
cxgb_rdma_ctl(struct adapter * adapter,unsigned int req,void * data)273 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
274 {
275 	int ret = 0;
276 
277 	switch (req) {
278 	case RDMA_GET_PARAMS: {
279 		struct rdma_info *rdma = data;
280 		struct pci_dev *pdev = adapter->pdev;
281 
282 		rdma->udbell_physbase = pci_resource_start(pdev, 2);
283 		rdma->udbell_len = pci_resource_len(pdev, 2);
284 		rdma->tpt_base =
285 			t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
286 		rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
287 		rdma->pbl_base =
288 			t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
289 		rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
290 		rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
291 		rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
292 		rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
293 		rdma->pdev = pdev;
294 		break;
295 	}
296 	case RDMA_CQ_OP:{
297 		unsigned long flags;
298 		struct rdma_cq_op *rdma = data;
299 
300 		/* may be called in any context */
301 		spin_lock_irqsave(&adapter->sge.reg_lock, flags);
302 		ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
303 					rdma->credits);
304 		spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
305 		break;
306 	}
307 	case RDMA_GET_MEM:{
308 		struct ch_mem_range *t = data;
309 		struct mc7 *mem;
310 
311 		if ((t->addr & 7) || (t->len & 7))
312 			return -EINVAL;
313 		if (t->mem_id == MEM_CM)
314 			mem = &adapter->cm;
315 		else if (t->mem_id == MEM_PMRX)
316 			mem = &adapter->pmrx;
317 		else if (t->mem_id == MEM_PMTX)
318 			mem = &adapter->pmtx;
319 		else
320 			return -EINVAL;
321 
322 		ret =
323 			t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
324 					(u64 *) t->buf);
325 		if (ret)
326 			return ret;
327 		break;
328 	}
329 	case RDMA_CQ_SETUP:{
330 		struct rdma_cq_setup *rdma = data;
331 
332 		spin_lock_irq(&adapter->sge.reg_lock);
333 		ret =
334 			t3_sge_init_cqcntxt(adapter, rdma->id,
335 					rdma->base_addr, rdma->size,
336 					ASYNC_NOTIF_RSPQ,
337 					rdma->ovfl_mode, rdma->credits,
338 					rdma->credit_thres);
339 		spin_unlock_irq(&adapter->sge.reg_lock);
340 		break;
341 	}
342 	case RDMA_CQ_DISABLE:
343 		spin_lock_irq(&adapter->sge.reg_lock);
344 		ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
345 		spin_unlock_irq(&adapter->sge.reg_lock);
346 		break;
347 	case RDMA_CTRL_QP_SETUP:{
348 		struct rdma_ctrlqp_setup *rdma = data;
349 
350 		spin_lock_irq(&adapter->sge.reg_lock);
351 		ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
352 						SGE_CNTXT_RDMA,
353 						ASYNC_NOTIF_RSPQ,
354 						rdma->base_addr, rdma->size,
355 						FW_RI_TID_START, 1, 0);
356 		spin_unlock_irq(&adapter->sge.reg_lock);
357 		break;
358 	}
359 	case RDMA_GET_MIB: {
360 		spin_lock(&adapter->stats_lock);
361 		t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
362 		spin_unlock(&adapter->stats_lock);
363 		break;
364 	}
365 	default:
366 		ret = -EOPNOTSUPP;
367 	}
368 	return ret;
369 }
370 
cxgb_offload_ctl(struct t3cdev * tdev,unsigned int req,void * data)371 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
372 {
373 	struct adapter *adapter = tdev2adap(tdev);
374 	struct tid_range *tid;
375 	struct mtutab *mtup;
376 	struct iff_mac *iffmacp;
377 	struct ddp_params *ddpp;
378 	struct adap_ports *ports;
379 	struct ofld_page_info *rx_page_info;
380 	struct tp_params *tp = &adapter->params.tp;
381 	int i;
382 
383 	switch (req) {
384 	case GET_MAX_OUTSTANDING_WR:
385 		*(unsigned int *)data = FW_WR_NUM;
386 		break;
387 	case GET_WR_LEN:
388 		*(unsigned int *)data = WR_FLITS;
389 		break;
390 	case GET_TX_MAX_CHUNK:
391 		*(unsigned int *)data = 1 << 20;	/* 1MB */
392 		break;
393 	case GET_TID_RANGE:
394 		tid = data;
395 		tid->num = t3_mc5_size(&adapter->mc5) -
396 		    adapter->params.mc5.nroutes -
397 		    adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
398 		tid->base = 0;
399 		break;
400 	case GET_STID_RANGE:
401 		tid = data;
402 		tid->num = adapter->params.mc5.nservers;
403 		tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
404 		    adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
405 		break;
406 	case GET_L2T_CAPACITY:
407 		*(unsigned int *)data = 2048;
408 		break;
409 	case GET_MTUS:
410 		mtup = data;
411 		mtup->size = NMTUS;
412 		mtup->mtus = adapter->params.mtus;
413 		break;
414 	case GET_IFF_FROM_MAC:
415 		iffmacp = data;
416 		iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
417 						iffmacp->vlan_tag &
418 						VLAN_VID_MASK);
419 		break;
420 	case GET_DDP_PARAMS:
421 		ddpp = data;
422 		ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
423 		ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
424 		ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
425 		break;
426 	case GET_PORTS:
427 		ports = data;
428 		ports->nports = adapter->params.nports;
429 		for_each_port(adapter, i)
430 			ports->lldevs[i] = adapter->port[i];
431 		break;
432 	case ULP_ISCSI_GET_PARAMS:
433 	case ULP_ISCSI_SET_PARAMS:
434 		if (!offload_running(adapter))
435 			return -EAGAIN;
436 		return cxgb_ulp_iscsi_ctl(adapter, req, data);
437 	case RDMA_GET_PARAMS:
438 	case RDMA_CQ_OP:
439 	case RDMA_CQ_SETUP:
440 	case RDMA_CQ_DISABLE:
441 	case RDMA_CTRL_QP_SETUP:
442 	case RDMA_GET_MEM:
443 	case RDMA_GET_MIB:
444 		if (!offload_running(adapter))
445 			return -EAGAIN;
446 		return cxgb_rdma_ctl(adapter, req, data);
447 	case GET_RX_PAGE_INFO:
448 		rx_page_info = data;
449 		rx_page_info->page_size = tp->rx_pg_size;
450 		rx_page_info->num = tp->rx_num_pgs;
451 		break;
452 	case GET_ISCSI_IPV4ADDR: {
453 		struct iscsi_ipv4addr *p = data;
454 		struct port_info *pi = netdev_priv(p->dev);
455 		p->ipv4addr = pi->iscsi_ipv4addr;
456 		break;
457 	}
458 	case GET_EMBEDDED_INFO: {
459 		struct ch_embedded_info *e = data;
460 
461 		spin_lock(&adapter->stats_lock);
462 		t3_get_fw_version(adapter, &e->fw_vers);
463 		t3_get_tp_version(adapter, &e->tp_vers);
464 		spin_unlock(&adapter->stats_lock);
465 		break;
466 	}
467 	default:
468 		return -EOPNOTSUPP;
469 	}
470 	return 0;
471 }
472 
473 /*
474  * Dummy handler for Rx offload packets in case we get an offload packet before
475  * proper processing is setup.  This complains and drops the packet as it isn't
476  * normal to get offload packets at this stage.
477  */
rx_offload_blackhole(struct t3cdev * dev,struct sk_buff ** skbs,int n)478 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
479 				int n)
480 {
481 	while (n--)
482 		dev_kfree_skb_any(skbs[n]);
483 	return 0;
484 }
485 
dummy_neigh_update(struct t3cdev * dev,struct neighbour * neigh)486 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
487 {
488 }
489 
cxgb3_set_dummy_ops(struct t3cdev * dev)490 void cxgb3_set_dummy_ops(struct t3cdev *dev)
491 {
492 	dev->recv = rx_offload_blackhole;
493 	dev->neigh_update = dummy_neigh_update;
494 }
495 
496 /*
497  * Free an active-open TID.
498  */
cxgb3_free_atid(struct t3cdev * tdev,int atid)499 void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
500 {
501 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
502 	union active_open_entry *p = atid2entry(t, atid);
503 	void *ctx = p->t3c_tid.ctx;
504 
505 	spin_lock_bh(&t->atid_lock);
506 	p->next = t->afree;
507 	t->afree = p;
508 	t->atids_in_use--;
509 	spin_unlock_bh(&t->atid_lock);
510 
511 	return ctx;
512 }
513 
514 EXPORT_SYMBOL(cxgb3_free_atid);
515 
516 /*
517  * Free a server TID and return it to the free pool.
518  */
cxgb3_free_stid(struct t3cdev * tdev,int stid)519 void cxgb3_free_stid(struct t3cdev *tdev, int stid)
520 {
521 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
522 	union listen_entry *p = stid2entry(t, stid);
523 
524 	spin_lock_bh(&t->stid_lock);
525 	p->next = t->sfree;
526 	t->sfree = p;
527 	t->stids_in_use--;
528 	spin_unlock_bh(&t->stid_lock);
529 }
530 
531 EXPORT_SYMBOL(cxgb3_free_stid);
532 
cxgb3_insert_tid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx,unsigned int tid)533 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
534 		      void *ctx, unsigned int tid)
535 {
536 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
537 
538 	t->tid_tab[tid].client = client;
539 	t->tid_tab[tid].ctx = ctx;
540 	atomic_inc(&t->tids_in_use);
541 }
542 
543 EXPORT_SYMBOL(cxgb3_insert_tid);
544 
545 /*
546  * Populate a TID_RELEASE WR.  The skb must be already propely sized.
547  */
mk_tid_release(struct sk_buff * skb,unsigned int tid)548 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
549 {
550 	struct cpl_tid_release *req;
551 
552 	skb->priority = CPL_PRIORITY_SETUP;
553 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
554 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
555 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
556 }
557 
t3_process_tid_release_list(struct work_struct * work)558 static void t3_process_tid_release_list(struct work_struct *work)
559 {
560 	struct t3c_data *td = container_of(work, struct t3c_data,
561 					   tid_release_task);
562 	struct sk_buff *skb;
563 	struct t3cdev *tdev = td->dev;
564 
565 
566 	spin_lock_bh(&td->tid_release_lock);
567 	while (td->tid_release_list) {
568 		struct t3c_tid_entry *p = td->tid_release_list;
569 
570 		td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
571 		spin_unlock_bh(&td->tid_release_lock);
572 
573 		skb = alloc_skb(sizeof(struct cpl_tid_release),
574 				GFP_KERNEL);
575 		if (!skb)
576 			skb = td->nofail_skb;
577 		if (!skb) {
578 			spin_lock_bh(&td->tid_release_lock);
579 			p->ctx = (void *)td->tid_release_list;
580 			td->tid_release_list = (struct t3c_tid_entry *)p;
581 			break;
582 		}
583 		mk_tid_release(skb, p - td->tid_maps.tid_tab);
584 		cxgb3_ofld_send(tdev, skb);
585 		p->ctx = NULL;
586 		if (skb == td->nofail_skb)
587 			td->nofail_skb =
588 				alloc_skb(sizeof(struct cpl_tid_release),
589 					GFP_KERNEL);
590 		spin_lock_bh(&td->tid_release_lock);
591 	}
592 	td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
593 	spin_unlock_bh(&td->tid_release_lock);
594 
595 	if (!td->nofail_skb)
596 		td->nofail_skb =
597 			alloc_skb(sizeof(struct cpl_tid_release),
598 				GFP_KERNEL);
599 }
600 
601 /* use ctx as a next pointer in the tid release list */
cxgb3_queue_tid_release(struct t3cdev * tdev,unsigned int tid)602 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
603 {
604 	struct t3c_data *td = T3C_DATA(tdev);
605 	struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
606 
607 	spin_lock_bh(&td->tid_release_lock);
608 	p->ctx = (void *)td->tid_release_list;
609 	p->client = NULL;
610 	td->tid_release_list = p;
611 	if (!p->ctx || td->release_list_incomplete)
612 		schedule_work(&td->tid_release_task);
613 	spin_unlock_bh(&td->tid_release_lock);
614 }
615 
616 EXPORT_SYMBOL(cxgb3_queue_tid_release);
617 
618 /*
619  * Remove a tid from the TID table.  A client may defer processing its last
620  * CPL message if it is locked at the time it arrives, and while the message
621  * sits in the client's backlog the TID may be reused for another connection.
622  * To handle this we atomically switch the TID association if it still points
623  * to the original client context.
624  */
cxgb3_remove_tid(struct t3cdev * tdev,void * ctx,unsigned int tid)625 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
626 {
627 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
628 
629 	BUG_ON(tid >= t->ntids);
630 	if (tdev->type == T3A)
631 		(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
632 	else {
633 		struct sk_buff *skb;
634 
635 		skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
636 		if (likely(skb)) {
637 			mk_tid_release(skb, tid);
638 			cxgb3_ofld_send(tdev, skb);
639 			t->tid_tab[tid].ctx = NULL;
640 		} else
641 			cxgb3_queue_tid_release(tdev, tid);
642 	}
643 	atomic_dec(&t->tids_in_use);
644 }
645 
646 EXPORT_SYMBOL(cxgb3_remove_tid);
647 
cxgb3_alloc_atid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx)648 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
649 		     void *ctx)
650 {
651 	int atid = -1;
652 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
653 
654 	spin_lock_bh(&t->atid_lock);
655 	if (t->afree &&
656 	    t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
657 	    t->ntids) {
658 		union active_open_entry *p = t->afree;
659 
660 		atid = (p - t->atid_tab) + t->atid_base;
661 		t->afree = p->next;
662 		p->t3c_tid.ctx = ctx;
663 		p->t3c_tid.client = client;
664 		t->atids_in_use++;
665 	}
666 	spin_unlock_bh(&t->atid_lock);
667 	return atid;
668 }
669 
670 EXPORT_SYMBOL(cxgb3_alloc_atid);
671 
cxgb3_alloc_stid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx)672 int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
673 		     void *ctx)
674 {
675 	int stid = -1;
676 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
677 
678 	spin_lock_bh(&t->stid_lock);
679 	if (t->sfree) {
680 		union listen_entry *p = t->sfree;
681 
682 		stid = (p - t->stid_tab) + t->stid_base;
683 		t->sfree = p->next;
684 		p->t3c_tid.ctx = ctx;
685 		p->t3c_tid.client = client;
686 		t->stids_in_use++;
687 	}
688 	spin_unlock_bh(&t->stid_lock);
689 	return stid;
690 }
691 
692 EXPORT_SYMBOL(cxgb3_alloc_stid);
693 
694 /* Get the t3cdev associated with a net_device */
dev2t3cdev(struct net_device * dev)695 struct t3cdev *dev2t3cdev(struct net_device *dev)
696 {
697 	const struct port_info *pi = netdev_priv(dev);
698 
699 	return (struct t3cdev *)pi->adapter;
700 }
701 
702 EXPORT_SYMBOL(dev2t3cdev);
703 
do_smt_write_rpl(struct t3cdev * dev,struct sk_buff * skb)704 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
705 {
706 	struct cpl_smt_write_rpl *rpl = cplhdr(skb);
707 
708 	if (rpl->status != CPL_ERR_NONE)
709 		printk(KERN_ERR
710 		       "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
711 		       rpl->status, GET_TID(rpl));
712 
713 	return CPL_RET_BUF_DONE;
714 }
715 
do_l2t_write_rpl(struct t3cdev * dev,struct sk_buff * skb)716 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
717 {
718 	struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
719 
720 	if (rpl->status != CPL_ERR_NONE)
721 		printk(KERN_ERR
722 		       "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
723 		       rpl->status, GET_TID(rpl));
724 
725 	return CPL_RET_BUF_DONE;
726 }
727 
do_rte_write_rpl(struct t3cdev * dev,struct sk_buff * skb)728 static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
729 {
730 	struct cpl_rte_write_rpl *rpl = cplhdr(skb);
731 
732 	if (rpl->status != CPL_ERR_NONE)
733 		printk(KERN_ERR
734 		       "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
735 		       rpl->status, GET_TID(rpl));
736 
737 	return CPL_RET_BUF_DONE;
738 }
739 
do_act_open_rpl(struct t3cdev * dev,struct sk_buff * skb)740 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
741 {
742 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
743 	unsigned int atid = G_TID(ntohl(rpl->atid));
744 	struct t3c_tid_entry *t3c_tid;
745 
746 	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
747 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
748 	    t3c_tid->client->handlers &&
749 	    t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
750 		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
751 								    t3c_tid->
752 								    ctx);
753 	} else {
754 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
755 		       dev->name, CPL_ACT_OPEN_RPL);
756 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
757 	}
758 }
759 
do_stid_rpl(struct t3cdev * dev,struct sk_buff * skb)760 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
761 {
762 	union opcode_tid *p = cplhdr(skb);
763 	unsigned int stid = G_TID(ntohl(p->opcode_tid));
764 	struct t3c_tid_entry *t3c_tid;
765 
766 	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
767 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
768 	    t3c_tid->client->handlers[p->opcode]) {
769 		return t3c_tid->client->handlers[p->opcode] (dev, skb,
770 							     t3c_tid->ctx);
771 	} else {
772 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
773 		       dev->name, p->opcode);
774 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
775 	}
776 }
777 
do_hwtid_rpl(struct t3cdev * dev,struct sk_buff * skb)778 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
779 {
780 	union opcode_tid *p = cplhdr(skb);
781 	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
782 	struct t3c_tid_entry *t3c_tid;
783 
784 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
785 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
786 	    t3c_tid->client->handlers[p->opcode]) {
787 		return t3c_tid->client->handlers[p->opcode]
788 		    (dev, skb, t3c_tid->ctx);
789 	} else {
790 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
791 		       dev->name, p->opcode);
792 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
793 	}
794 }
795 
do_cr(struct t3cdev * dev,struct sk_buff * skb)796 static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
797 {
798 	struct cpl_pass_accept_req *req = cplhdr(skb);
799 	unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
800 	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
801 	struct t3c_tid_entry *t3c_tid;
802 	unsigned int tid = GET_TID(req);
803 
804 	if (unlikely(tid >= t->ntids)) {
805 		printk("%s: passive open TID %u too large\n",
806 		       dev->name, tid);
807 		t3_fatal_err(tdev2adap(dev));
808 		return CPL_RET_BUF_DONE;
809 	}
810 
811 	t3c_tid = lookup_stid(t, stid);
812 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
813 	    t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
814 		return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
815 		    (dev, skb, t3c_tid->ctx);
816 	} else {
817 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
818 		       dev->name, CPL_PASS_ACCEPT_REQ);
819 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
820 	}
821 }
822 
823 /*
824  * Returns an sk_buff for a reply CPL message of size len.  If the input
825  * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
826  * is allocated.  The input skb must be of size at least len.  Note that this
827  * operation does not destroy the original skb data even if it decides to reuse
828  * the buffer.
829  */
cxgb3_get_cpl_reply_skb(struct sk_buff * skb,size_t len,gfp_t gfp)830 static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
831 					       gfp_t gfp)
832 {
833 	if (likely(!skb_cloned(skb))) {
834 		BUG_ON(skb->len < len);
835 		__skb_trim(skb, len);
836 		skb_get(skb);
837 	} else {
838 		skb = alloc_skb(len, gfp);
839 		if (skb)
840 			__skb_put(skb, len);
841 	}
842 	return skb;
843 }
844 
do_abort_req_rss(struct t3cdev * dev,struct sk_buff * skb)845 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
846 {
847 	union opcode_tid *p = cplhdr(skb);
848 	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
849 	struct t3c_tid_entry *t3c_tid;
850 
851 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
852 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
853 	    t3c_tid->client->handlers[p->opcode]) {
854 		return t3c_tid->client->handlers[p->opcode]
855 		    (dev, skb, t3c_tid->ctx);
856 	} else {
857 		struct cpl_abort_req_rss *req = cplhdr(skb);
858 		struct cpl_abort_rpl *rpl;
859 		struct sk_buff *reply_skb;
860 		unsigned int tid = GET_TID(req);
861 		u8 cmd = req->status;
862 
863 		if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
864 		    req->status == CPL_ERR_PERSIST_NEG_ADVICE)
865 			goto out;
866 
867 		reply_skb = cxgb3_get_cpl_reply_skb(skb,
868 						    sizeof(struct
869 							   cpl_abort_rpl),
870 						    GFP_ATOMIC);
871 
872 		if (!reply_skb) {
873 			printk("do_abort_req_rss: couldn't get skb!\n");
874 			goto out;
875 		}
876 		reply_skb->priority = CPL_PRIORITY_DATA;
877 		__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
878 		rpl = cplhdr(reply_skb);
879 		rpl->wr.wr_hi =
880 		    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
881 		rpl->wr.wr_lo = htonl(V_WR_TID(tid));
882 		OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
883 		rpl->cmd = cmd;
884 		cxgb3_ofld_send(dev, reply_skb);
885 out:
886 		return CPL_RET_BUF_DONE;
887 	}
888 }
889 
do_act_establish(struct t3cdev * dev,struct sk_buff * skb)890 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
891 {
892 	struct cpl_act_establish *req = cplhdr(skb);
893 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
894 	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
895 	struct t3c_tid_entry *t3c_tid;
896 	unsigned int tid = GET_TID(req);
897 
898 	if (unlikely(tid >= t->ntids)) {
899 		printk("%s: active establish TID %u too large\n",
900 		       dev->name, tid);
901 		t3_fatal_err(tdev2adap(dev));
902 		return CPL_RET_BUF_DONE;
903 	}
904 
905 	t3c_tid = lookup_atid(t, atid);
906 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
907 	    t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
908 		return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
909 		    (dev, skb, t3c_tid->ctx);
910 	} else {
911 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
912 		       dev->name, CPL_ACT_ESTABLISH);
913 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
914 	}
915 }
916 
do_trace(struct t3cdev * dev,struct sk_buff * skb)917 static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
918 {
919 	struct cpl_trace_pkt *p = cplhdr(skb);
920 
921 	skb->protocol = htons(0xffff);
922 	skb->dev = dev->lldev;
923 	skb_pull(skb, sizeof(*p));
924 	skb_reset_mac_header(skb);
925 	netif_receive_skb(skb);
926 	return 0;
927 }
928 
929 /*
930  * That skb would better have come from process_responses() where we abuse
931  * ->priority and ->csum to carry our data.  NB: if we get to per-arch
932  * ->csum, the things might get really interesting here.
933  */
934 
get_hwtid(struct sk_buff * skb)935 static inline u32 get_hwtid(struct sk_buff *skb)
936 {
937 	return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
938 }
939 
get_opcode(struct sk_buff * skb)940 static inline u32 get_opcode(struct sk_buff *skb)
941 {
942 	return G_OPCODE(ntohl((__force __be32)skb->csum));
943 }
944 
do_term(struct t3cdev * dev,struct sk_buff * skb)945 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
946 {
947 	unsigned int hwtid = get_hwtid(skb);
948 	unsigned int opcode = get_opcode(skb);
949 	struct t3c_tid_entry *t3c_tid;
950 
951 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
952 	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
953 	    t3c_tid->client->handlers[opcode]) {
954 		return t3c_tid->client->handlers[opcode] (dev, skb,
955 							  t3c_tid->ctx);
956 	} else {
957 		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
958 		       dev->name, opcode);
959 		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
960 	}
961 }
962 
nb_callback(struct notifier_block * self,unsigned long event,void * ctx)963 static int nb_callback(struct notifier_block *self, unsigned long event,
964 		       void *ctx)
965 {
966 	switch (event) {
967 	case (NETEVENT_NEIGH_UPDATE):{
968 		cxgb_neigh_update((struct neighbour *)ctx);
969 		break;
970 	}
971 	case (NETEVENT_REDIRECT):{
972 		struct netevent_redirect *nr = ctx;
973 		cxgb_redirect(nr->old, nr->new);
974 		cxgb_neigh_update(nr->new->neighbour);
975 		break;
976 	}
977 	default:
978 		break;
979 	}
980 	return 0;
981 }
982 
983 static struct notifier_block nb = {
984 	.notifier_call = nb_callback
985 };
986 
987 /*
988  * Process a received packet with an unknown/unexpected CPL opcode.
989  */
do_bad_cpl(struct t3cdev * dev,struct sk_buff * skb)990 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
991 {
992 	printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
993 	       *skb->data);
994 	return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
995 }
996 
997 /*
998  * Handlers for each CPL opcode
999  */
1000 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1001 
1002 /*
1003  * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
1004  * to unregister an existing handler.
1005  */
t3_register_cpl_handler(unsigned int opcode,cpl_handler_func h)1006 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1007 {
1008 	if (opcode < NUM_CPL_CMDS)
1009 		cpl_handlers[opcode] = h ? h : do_bad_cpl;
1010 	else
1011 		printk(KERN_ERR "T3C: handler registration for "
1012 		       "opcode %x failed\n", opcode);
1013 }
1014 
1015 EXPORT_SYMBOL(t3_register_cpl_handler);
1016 
1017 /*
1018  * T3CDEV's receive method.
1019  */
process_rx(struct t3cdev * dev,struct sk_buff ** skbs,int n)1020 static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1021 {
1022 	while (n--) {
1023 		struct sk_buff *skb = *skbs++;
1024 		unsigned int opcode = get_opcode(skb);
1025 		int ret = cpl_handlers[opcode] (dev, skb);
1026 
1027 #if VALIDATE_TID
1028 		if (ret & CPL_RET_UNKNOWN_TID) {
1029 			union opcode_tid *p = cplhdr(skb);
1030 
1031 			printk(KERN_ERR "%s: CPL message (opcode %u) had "
1032 			       "unknown TID %u\n", dev->name, opcode,
1033 			       G_TID(ntohl(p->opcode_tid)));
1034 		}
1035 #endif
1036 		if (ret & CPL_RET_BUF_DONE)
1037 			kfree_skb(skb);
1038 	}
1039 	return 0;
1040 }
1041 
1042 /*
1043  * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1044  */
cxgb3_ofld_send(struct t3cdev * dev,struct sk_buff * skb)1045 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1046 {
1047 	int r;
1048 
1049 	local_bh_disable();
1050 	r = dev->send(dev, skb);
1051 	local_bh_enable();
1052 	return r;
1053 }
1054 
1055 EXPORT_SYMBOL(cxgb3_ofld_send);
1056 
is_offloading(struct net_device * dev)1057 static int is_offloading(struct net_device *dev)
1058 {
1059 	struct adapter *adapter;
1060 	int i;
1061 
1062 	read_lock_bh(&adapter_list_lock);
1063 	list_for_each_entry(adapter, &adapter_list, adapter_list) {
1064 		for_each_port(adapter, i) {
1065 			if (dev == adapter->port[i]) {
1066 				read_unlock_bh(&adapter_list_lock);
1067 				return 1;
1068 			}
1069 		}
1070 	}
1071 	read_unlock_bh(&adapter_list_lock);
1072 	return 0;
1073 }
1074 
cxgb_neigh_update(struct neighbour * neigh)1075 static void cxgb_neigh_update(struct neighbour *neigh)
1076 {
1077 	struct net_device *dev = neigh->dev;
1078 
1079 	if (dev && (is_offloading(dev))) {
1080 		struct t3cdev *tdev = dev2t3cdev(dev);
1081 
1082 		BUG_ON(!tdev);
1083 		t3_l2t_update(tdev, neigh);
1084 	}
1085 }
1086 
set_l2t_ix(struct t3cdev * tdev,u32 tid,struct l2t_entry * e)1087 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1088 {
1089 	struct sk_buff *skb;
1090 	struct cpl_set_tcb_field *req;
1091 
1092 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1093 	if (!skb) {
1094 		printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1095 		return;
1096 	}
1097 	skb->priority = CPL_PRIORITY_CONTROL;
1098 	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
1099 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1100 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1101 	req->reply = 0;
1102 	req->cpu_idx = 0;
1103 	req->word = htons(W_TCB_L2T_IX);
1104 	req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1105 	req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1106 	tdev->send(tdev, skb);
1107 }
1108 
cxgb_redirect(struct dst_entry * old,struct dst_entry * new)1109 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1110 {
1111 	struct net_device *olddev, *newdev;
1112 	struct tid_info *ti;
1113 	struct t3cdev *tdev;
1114 	u32 tid;
1115 	int update_tcb;
1116 	struct l2t_entry *e;
1117 	struct t3c_tid_entry *te;
1118 
1119 	olddev = old->neighbour->dev;
1120 	newdev = new->neighbour->dev;
1121 	if (!is_offloading(olddev))
1122 		return;
1123 	if (!is_offloading(newdev)) {
1124 		printk(KERN_WARNING "%s: Redirect to non-offload "
1125 		       "device ignored.\n", __func__);
1126 		return;
1127 	}
1128 	tdev = dev2t3cdev(olddev);
1129 	BUG_ON(!tdev);
1130 	if (tdev != dev2t3cdev(newdev)) {
1131 		printk(KERN_WARNING "%s: Redirect to different "
1132 		       "offload device ignored.\n", __func__);
1133 		return;
1134 	}
1135 
1136 	/* Add new L2T entry */
1137 	e = t3_l2t_get(tdev, new->neighbour, newdev);
1138 	if (!e) {
1139 		printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1140 		       __func__);
1141 		return;
1142 	}
1143 
1144 	/* Walk tid table and notify clients of dst change. */
1145 	ti = &(T3C_DATA(tdev))->tid_maps;
1146 	for (tid = 0; tid < ti->ntids; tid++) {
1147 		te = lookup_tid(ti, tid);
1148 		BUG_ON(!te);
1149 		if (te && te->ctx && te->client && te->client->redirect) {
1150 			update_tcb = te->client->redirect(te->ctx, old, new, e);
1151 			if (update_tcb) {
1152 				l2t_hold(L2DATA(tdev), e);
1153 				set_l2t_ix(tdev, tid, e);
1154 			}
1155 		}
1156 	}
1157 	l2t_release(L2DATA(tdev), e);
1158 }
1159 
1160 /*
1161  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1162  * The allocated memory is cleared.
1163  */
cxgb_alloc_mem(unsigned long size)1164 void *cxgb_alloc_mem(unsigned long size)
1165 {
1166 	void *p = kzalloc(size, GFP_KERNEL);
1167 
1168 	if (!p)
1169 		p = vzalloc(size);
1170 	return p;
1171 }
1172 
1173 /*
1174  * Free memory allocated through t3_alloc_mem().
1175  */
cxgb_free_mem(void * addr)1176 void cxgb_free_mem(void *addr)
1177 {
1178 	if (is_vmalloc_addr(addr))
1179 		vfree(addr);
1180 	else
1181 		kfree(addr);
1182 }
1183 
1184 /*
1185  * Allocate and initialize the TID tables.  Returns 0 on success.
1186  */
init_tid_tabs(struct tid_info * t,unsigned int ntids,unsigned int natids,unsigned int nstids,unsigned int atid_base,unsigned int stid_base)1187 static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1188 			 unsigned int natids, unsigned int nstids,
1189 			 unsigned int atid_base, unsigned int stid_base)
1190 {
1191 	unsigned long size = ntids * sizeof(*t->tid_tab) +
1192 	    natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1193 
1194 	t->tid_tab = cxgb_alloc_mem(size);
1195 	if (!t->tid_tab)
1196 		return -ENOMEM;
1197 
1198 	t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1199 	t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1200 	t->ntids = ntids;
1201 	t->nstids = nstids;
1202 	t->stid_base = stid_base;
1203 	t->sfree = NULL;
1204 	t->natids = natids;
1205 	t->atid_base = atid_base;
1206 	t->afree = NULL;
1207 	t->stids_in_use = t->atids_in_use = 0;
1208 	atomic_set(&t->tids_in_use, 0);
1209 	spin_lock_init(&t->stid_lock);
1210 	spin_lock_init(&t->atid_lock);
1211 
1212 	/*
1213 	 * Setup the free lists for stid_tab and atid_tab.
1214 	 */
1215 	if (nstids) {
1216 		while (--nstids)
1217 			t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1218 		t->sfree = t->stid_tab;
1219 	}
1220 	if (natids) {
1221 		while (--natids)
1222 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1223 		t->afree = t->atid_tab;
1224 	}
1225 	return 0;
1226 }
1227 
free_tid_maps(struct tid_info * t)1228 static void free_tid_maps(struct tid_info *t)
1229 {
1230 	cxgb_free_mem(t->tid_tab);
1231 }
1232 
add_adapter(struct adapter * adap)1233 static inline void add_adapter(struct adapter *adap)
1234 {
1235 	write_lock_bh(&adapter_list_lock);
1236 	list_add_tail(&adap->adapter_list, &adapter_list);
1237 	write_unlock_bh(&adapter_list_lock);
1238 }
1239 
remove_adapter(struct adapter * adap)1240 static inline void remove_adapter(struct adapter *adap)
1241 {
1242 	write_lock_bh(&adapter_list_lock);
1243 	list_del(&adap->adapter_list);
1244 	write_unlock_bh(&adapter_list_lock);
1245 }
1246 
cxgb3_offload_activate(struct adapter * adapter)1247 int cxgb3_offload_activate(struct adapter *adapter)
1248 {
1249 	struct t3cdev *dev = &adapter->tdev;
1250 	int natids, err;
1251 	struct t3c_data *t;
1252 	struct tid_range stid_range, tid_range;
1253 	struct mtutab mtutab;
1254 	unsigned int l2t_capacity;
1255 
1256 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1257 	if (!t)
1258 		return -ENOMEM;
1259 
1260 	err = -EOPNOTSUPP;
1261 	if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1262 	    dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1263 	    dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1264 	    dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1265 	    dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1266 	    dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1267 		goto out_free;
1268 
1269 	err = -ENOMEM;
1270 	L2DATA(dev) = t3_init_l2t(l2t_capacity);
1271 	if (!L2DATA(dev))
1272 		goto out_free;
1273 
1274 	natids = min(tid_range.num / 2, MAX_ATIDS);
1275 	err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1276 			    stid_range.num, ATID_BASE, stid_range.base);
1277 	if (err)
1278 		goto out_free_l2t;
1279 
1280 	t->mtus = mtutab.mtus;
1281 	t->nmtus = mtutab.size;
1282 
1283 	INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1284 	spin_lock_init(&t->tid_release_lock);
1285 	INIT_LIST_HEAD(&t->list_node);
1286 	t->dev = dev;
1287 
1288 	T3C_DATA(dev) = t;
1289 	dev->recv = process_rx;
1290 	dev->neigh_update = t3_l2t_update;
1291 
1292 	/* Register netevent handler once */
1293 	if (list_empty(&adapter_list))
1294 		register_netevent_notifier(&nb);
1295 
1296 	t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1297 	t->release_list_incomplete = 0;
1298 
1299 	add_adapter(adapter);
1300 	return 0;
1301 
1302 out_free_l2t:
1303 	t3_free_l2t(L2DATA(dev));
1304 	L2DATA(dev) = NULL;
1305 out_free:
1306 	kfree(t);
1307 	return err;
1308 }
1309 
cxgb3_offload_deactivate(struct adapter * adapter)1310 void cxgb3_offload_deactivate(struct adapter *adapter)
1311 {
1312 	struct t3cdev *tdev = &adapter->tdev;
1313 	struct t3c_data *t = T3C_DATA(tdev);
1314 
1315 	remove_adapter(adapter);
1316 	if (list_empty(&adapter_list))
1317 		unregister_netevent_notifier(&nb);
1318 
1319 	free_tid_maps(&t->tid_maps);
1320 	T3C_DATA(tdev) = NULL;
1321 	t3_free_l2t(L2DATA(tdev));
1322 	L2DATA(tdev) = NULL;
1323 	if (t->nofail_skb)
1324 		kfree_skb(t->nofail_skb);
1325 	kfree(t);
1326 }
1327 
register_tdev(struct t3cdev * tdev)1328 static inline void register_tdev(struct t3cdev *tdev)
1329 {
1330 	static int unit;
1331 
1332 	mutex_lock(&cxgb3_db_lock);
1333 	snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1334 	list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1335 	mutex_unlock(&cxgb3_db_lock);
1336 }
1337 
unregister_tdev(struct t3cdev * tdev)1338 static inline void unregister_tdev(struct t3cdev *tdev)
1339 {
1340 	mutex_lock(&cxgb3_db_lock);
1341 	list_del(&tdev->ofld_dev_list);
1342 	mutex_unlock(&cxgb3_db_lock);
1343 }
1344 
adap2type(struct adapter * adapter)1345 static inline int adap2type(struct adapter *adapter)
1346 {
1347 	int type = 0;
1348 
1349 	switch (adapter->params.rev) {
1350 	case T3_REV_A:
1351 		type = T3A;
1352 		break;
1353 	case T3_REV_B:
1354 	case T3_REV_B2:
1355 		type = T3B;
1356 		break;
1357 	case T3_REV_C:
1358 		type = T3C;
1359 		break;
1360 	}
1361 	return type;
1362 }
1363 
cxgb3_adapter_ofld(struct adapter * adapter)1364 void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1365 {
1366 	struct t3cdev *tdev = &adapter->tdev;
1367 
1368 	INIT_LIST_HEAD(&tdev->ofld_dev_list);
1369 
1370 	cxgb3_set_dummy_ops(tdev);
1371 	tdev->send = t3_offload_tx;
1372 	tdev->ctl = cxgb_offload_ctl;
1373 	tdev->type = adap2type(adapter);
1374 
1375 	register_tdev(tdev);
1376 }
1377 
cxgb3_adapter_unofld(struct adapter * adapter)1378 void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1379 {
1380 	struct t3cdev *tdev = &adapter->tdev;
1381 
1382 	tdev->recv = NULL;
1383 	tdev->neigh_update = NULL;
1384 
1385 	unregister_tdev(tdev);
1386 }
1387 
cxgb3_offload_init(void)1388 void __init cxgb3_offload_init(void)
1389 {
1390 	int i;
1391 
1392 	for (i = 0; i < NUM_CPL_CMDS; ++i)
1393 		cpl_handlers[i] = do_bad_cpl;
1394 
1395 	t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1396 	t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1397 	t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1398 	t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1399 	t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1400 	t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1401 	t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1402 	t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1403 	t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1404 	t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1405 	t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1406 	t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1407 	t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1408 	t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1409 	t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1410 	t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1411 	t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1412 	t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1413 	t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1414 	t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1415 	t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1416 	t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1417 	t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1418 	t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1419 	t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1420 	t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1421 }
1422