1 /*
2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/inetdevice.h>
41 #include <linux/delay.h>
42 #include <linux/ethtool.h>
43 #include <linux/mii.h>
44 #include <linux/if_vlan.h>
45 #include <linux/crc32.h>
46 #include <linux/in.h>
47 #include <linux/ip.h>
48 #include <linux/tcp.h>
49 #include <linux/init.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/if_arp.h>
52 #include <linux/vmalloc.h>
53 #include <linux/slab.h>
54 
55 #include <asm/io.h>
56 #include <asm/irq.h>
57 #include <asm/byteorder.h>
58 
59 #include <rdma/ib_smi.h>
60 #include <rdma/ib_umem.h>
61 #include <rdma/ib_user_verbs.h>
62 #include "c2.h"
63 #include "c2_provider.h"
64 #include "c2_user.h"
65 
c2_query_device(struct ib_device * ibdev,struct ib_device_attr * props)66 static int c2_query_device(struct ib_device *ibdev,
67 			   struct ib_device_attr *props)
68 {
69 	struct c2_dev *c2dev = to_c2dev(ibdev);
70 
71 	pr_debug("%s:%u\n", __func__, __LINE__);
72 
73 	*props = c2dev->props;
74 	return 0;
75 }
76 
c2_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)77 static int c2_query_port(struct ib_device *ibdev,
78 			 u8 port, struct ib_port_attr *props)
79 {
80 	pr_debug("%s:%u\n", __func__, __LINE__);
81 
82 	props->max_mtu = IB_MTU_4096;
83 	props->lid = 0;
84 	props->lmc = 0;
85 	props->sm_lid = 0;
86 	props->sm_sl = 0;
87 	props->state = IB_PORT_ACTIVE;
88 	props->phys_state = 0;
89 	props->port_cap_flags =
90 	    IB_PORT_CM_SUP |
91 	    IB_PORT_REINIT_SUP |
92 	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
93 	props->gid_tbl_len = 1;
94 	props->pkey_tbl_len = 1;
95 	props->qkey_viol_cntr = 0;
96 	props->active_width = 1;
97 	props->active_speed = 1;
98 
99 	return 0;
100 }
101 
c2_modify_port(struct ib_device * ibdev,u8 port,int port_modify_mask,struct ib_port_modify * props)102 static int c2_modify_port(struct ib_device *ibdev,
103 			  u8 port, int port_modify_mask,
104 			  struct ib_port_modify *props)
105 {
106 	pr_debug("%s:%u\n", __func__, __LINE__);
107 	return 0;
108 }
109 
c2_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)110 static int c2_query_pkey(struct ib_device *ibdev,
111 			 u8 port, u16 index, u16 * pkey)
112 {
113 	pr_debug("%s:%u\n", __func__, __LINE__);
114 	*pkey = 0;
115 	return 0;
116 }
117 
c2_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)118 static int c2_query_gid(struct ib_device *ibdev, u8 port,
119 			int index, union ib_gid *gid)
120 {
121 	struct c2_dev *c2dev = to_c2dev(ibdev);
122 
123 	pr_debug("%s:%u\n", __func__, __LINE__);
124 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
125 	memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
126 
127 	return 0;
128 }
129 
130 /* Allocate the user context data structure. This keeps track
131  * of all objects associated with a particular user-mode client.
132  */
c2_alloc_ucontext(struct ib_device * ibdev,struct ib_udata * udata)133 static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
134 					     struct ib_udata *udata)
135 {
136 	struct c2_ucontext *context;
137 
138 	pr_debug("%s:%u\n", __func__, __LINE__);
139 	context = kmalloc(sizeof(*context), GFP_KERNEL);
140 	if (!context)
141 		return ERR_PTR(-ENOMEM);
142 
143 	return &context->ibucontext;
144 }
145 
c2_dealloc_ucontext(struct ib_ucontext * context)146 static int c2_dealloc_ucontext(struct ib_ucontext *context)
147 {
148 	pr_debug("%s:%u\n", __func__, __LINE__);
149 	kfree(context);
150 	return 0;
151 }
152 
c2_mmap_uar(struct ib_ucontext * context,struct vm_area_struct * vma)153 static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
154 {
155 	pr_debug("%s:%u\n", __func__, __LINE__);
156 	return -ENOSYS;
157 }
158 
c2_alloc_pd(struct ib_device * ibdev,struct ib_ucontext * context,struct ib_udata * udata)159 static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
160 				 struct ib_ucontext *context,
161 				 struct ib_udata *udata)
162 {
163 	struct c2_pd *pd;
164 	int err;
165 
166 	pr_debug("%s:%u\n", __func__, __LINE__);
167 
168 	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
169 	if (!pd)
170 		return ERR_PTR(-ENOMEM);
171 
172 	err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
173 	if (err) {
174 		kfree(pd);
175 		return ERR_PTR(err);
176 	}
177 
178 	if (context) {
179 		if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
180 			c2_pd_free(to_c2dev(ibdev), pd);
181 			kfree(pd);
182 			return ERR_PTR(-EFAULT);
183 		}
184 	}
185 
186 	return &pd->ibpd;
187 }
188 
c2_dealloc_pd(struct ib_pd * pd)189 static int c2_dealloc_pd(struct ib_pd *pd)
190 {
191 	pr_debug("%s:%u\n", __func__, __LINE__);
192 	c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
193 	kfree(pd);
194 
195 	return 0;
196 }
197 
c2_ah_create(struct ib_pd * pd,struct ib_ah_attr * ah_attr)198 static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
199 {
200 	pr_debug("%s:%u\n", __func__, __LINE__);
201 	return ERR_PTR(-ENOSYS);
202 }
203 
c2_ah_destroy(struct ib_ah * ah)204 static int c2_ah_destroy(struct ib_ah *ah)
205 {
206 	pr_debug("%s:%u\n", __func__, __LINE__);
207 	return -ENOSYS;
208 }
209 
c2_add_ref(struct ib_qp * ibqp)210 static void c2_add_ref(struct ib_qp *ibqp)
211 {
212 	struct c2_qp *qp;
213 	BUG_ON(!ibqp);
214 	qp = to_c2qp(ibqp);
215 	atomic_inc(&qp->refcount);
216 }
217 
c2_rem_ref(struct ib_qp * ibqp)218 static void c2_rem_ref(struct ib_qp *ibqp)
219 {
220 	struct c2_qp *qp;
221 	BUG_ON(!ibqp);
222 	qp = to_c2qp(ibqp);
223 	if (atomic_dec_and_test(&qp->refcount))
224 		wake_up(&qp->wait);
225 }
226 
c2_get_qp(struct ib_device * device,int qpn)227 struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
228 {
229 	struct c2_dev* c2dev = to_c2dev(device);
230 	struct c2_qp *qp;
231 
232 	qp = c2_find_qpn(c2dev, qpn);
233 	pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
234 		__func__, qp, qpn, device,
235 		(qp?atomic_read(&qp->refcount):0));
236 
237 	return (qp?&qp->ibqp:NULL);
238 }
239 
c2_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)240 static struct ib_qp *c2_create_qp(struct ib_pd *pd,
241 				  struct ib_qp_init_attr *init_attr,
242 				  struct ib_udata *udata)
243 {
244 	struct c2_qp *qp;
245 	int err;
246 
247 	pr_debug("%s:%u\n", __func__, __LINE__);
248 
249 	if (init_attr->create_flags)
250 		return ERR_PTR(-EINVAL);
251 
252 	switch (init_attr->qp_type) {
253 	case IB_QPT_RC:
254 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
255 		if (!qp) {
256 			pr_debug("%s: Unable to allocate QP\n", __func__);
257 			return ERR_PTR(-ENOMEM);
258 		}
259 		spin_lock_init(&qp->lock);
260 		if (pd->uobject) {
261 			/* userspace specific */
262 		}
263 
264 		err = c2_alloc_qp(to_c2dev(pd->device),
265 				  to_c2pd(pd), init_attr, qp);
266 
267 		if (err && pd->uobject) {
268 			/* userspace specific */
269 		}
270 
271 		break;
272 	default:
273 		pr_debug("%s: Invalid QP type: %d\n", __func__,
274 			init_attr->qp_type);
275 		return ERR_PTR(-EINVAL);
276 	}
277 
278 	if (err) {
279 		kfree(qp);
280 		return ERR_PTR(err);
281 	}
282 
283 	return &qp->ibqp;
284 }
285 
c2_destroy_qp(struct ib_qp * ib_qp)286 static int c2_destroy_qp(struct ib_qp *ib_qp)
287 {
288 	struct c2_qp *qp = to_c2qp(ib_qp);
289 
290 	pr_debug("%s:%u qp=%p,qp->state=%d\n",
291 		__func__, __LINE__, ib_qp, qp->state);
292 	c2_free_qp(to_c2dev(ib_qp->device), qp);
293 	kfree(qp);
294 	return 0;
295 }
296 
c2_create_cq(struct ib_device * ibdev,int entries,int vector,struct ib_ucontext * context,struct ib_udata * udata)297 static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
298 				  struct ib_ucontext *context,
299 				  struct ib_udata *udata)
300 {
301 	struct c2_cq *cq;
302 	int err;
303 
304 	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
305 	if (!cq) {
306 		pr_debug("%s: Unable to allocate CQ\n", __func__);
307 		return ERR_PTR(-ENOMEM);
308 	}
309 
310 	err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
311 	if (err) {
312 		pr_debug("%s: error initializing CQ\n", __func__);
313 		kfree(cq);
314 		return ERR_PTR(err);
315 	}
316 
317 	return &cq->ibcq;
318 }
319 
c2_destroy_cq(struct ib_cq * ib_cq)320 static int c2_destroy_cq(struct ib_cq *ib_cq)
321 {
322 	struct c2_cq *cq = to_c2cq(ib_cq);
323 
324 	pr_debug("%s:%u\n", __func__, __LINE__);
325 
326 	c2_free_cq(to_c2dev(ib_cq->device), cq);
327 	kfree(cq);
328 
329 	return 0;
330 }
331 
c2_convert_access(int acc)332 static inline u32 c2_convert_access(int acc)
333 {
334 	return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
335 	    (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
336 	    (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
337 	    C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
338 }
339 
c2_reg_phys_mr(struct ib_pd * ib_pd,struct ib_phys_buf * buffer_list,int num_phys_buf,int acc,u64 * iova_start)340 static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
341 				    struct ib_phys_buf *buffer_list,
342 				    int num_phys_buf, int acc, u64 * iova_start)
343 {
344 	struct c2_mr *mr;
345 	u64 *page_list;
346 	u32 total_len;
347 	int err, i, j, k, page_shift, pbl_depth;
348 
349 	pbl_depth = 0;
350 	total_len = 0;
351 
352 	page_shift = PAGE_SHIFT;
353 	/*
354 	 * If there is only 1 buffer we assume this could
355 	 * be a map of all phy mem...use a 32k page_shift.
356 	 */
357 	if (num_phys_buf == 1)
358 		page_shift += 3;
359 
360 	for (i = 0; i < num_phys_buf; i++) {
361 
362 		if (buffer_list[i].addr & ~PAGE_MASK) {
363 			pr_debug("Unaligned Memory Buffer: 0x%x\n",
364 				(unsigned int) buffer_list[i].addr);
365 			return ERR_PTR(-EINVAL);
366 		}
367 
368 		if (!buffer_list[i].size) {
369 			pr_debug("Invalid Buffer Size\n");
370 			return ERR_PTR(-EINVAL);
371 		}
372 
373 		total_len += buffer_list[i].size;
374 		pbl_depth += ALIGN(buffer_list[i].size,
375 				   (1 << page_shift)) >> page_shift;
376 	}
377 
378 	page_list = vmalloc(sizeof(u64) * pbl_depth);
379 	if (!page_list) {
380 		pr_debug("couldn't vmalloc page_list of size %zd\n",
381 			(sizeof(u64) * pbl_depth));
382 		return ERR_PTR(-ENOMEM);
383 	}
384 
385 	for (i = 0, j = 0; i < num_phys_buf; i++) {
386 
387 		int naddrs;
388 
389  		naddrs = ALIGN(buffer_list[i].size,
390 			       (1 << page_shift)) >> page_shift;
391 		for (k = 0; k < naddrs; k++)
392 			page_list[j++] = (buffer_list[i].addr +
393 						     (k << page_shift));
394 	}
395 
396 	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
397 	if (!mr) {
398 		vfree(page_list);
399 		return ERR_PTR(-ENOMEM);
400 	}
401 
402 	mr->pd = to_c2pd(ib_pd);
403 	mr->umem = NULL;
404 	pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
405 		"*iova_start %llx, first pa %llx, last pa %llx\n",
406 		__func__, page_shift, pbl_depth, total_len,
407 		(unsigned long long) *iova_start,
408 	       	(unsigned long long) page_list[0],
409 	       	(unsigned long long) page_list[pbl_depth-1]);
410   	err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
411  					 (1 << page_shift), pbl_depth,
412 					 total_len, 0, iova_start,
413 					 c2_convert_access(acc), mr);
414 	vfree(page_list);
415 	if (err) {
416 		kfree(mr);
417 		return ERR_PTR(err);
418 	}
419 
420 	return &mr->ibmr;
421 }
422 
c2_get_dma_mr(struct ib_pd * pd,int acc)423 static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
424 {
425 	struct ib_phys_buf bl;
426 	u64 kva = 0;
427 
428 	pr_debug("%s:%u\n", __func__, __LINE__);
429 
430 	/* AMSO1100 limit */
431 	bl.size = 0xffffffff;
432 	bl.addr = 0;
433 	return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
434 }
435 
c2_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)436 static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
437 				    u64 virt, int acc, struct ib_udata *udata)
438 {
439 	u64 *pages;
440 	u64 kva = 0;
441 	int shift, n, len;
442 	int i, j, k;
443 	int err = 0;
444 	struct ib_umem_chunk *chunk;
445 	struct c2_pd *c2pd = to_c2pd(pd);
446 	struct c2_mr *c2mr;
447 
448 	pr_debug("%s:%u\n", __func__, __LINE__);
449 
450 	c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
451 	if (!c2mr)
452 		return ERR_PTR(-ENOMEM);
453 	c2mr->pd = c2pd;
454 
455 	c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
456 	if (IS_ERR(c2mr->umem)) {
457 		err = PTR_ERR(c2mr->umem);
458 		kfree(c2mr);
459 		return ERR_PTR(err);
460 	}
461 
462 	shift = ffs(c2mr->umem->page_size) - 1;
463 
464 	n = 0;
465 	list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
466 		n += chunk->nents;
467 
468 	pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
469 	if (!pages) {
470 		err = -ENOMEM;
471 		goto err;
472 	}
473 
474 	i = 0;
475 	list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
476 		for (j = 0; j < chunk->nmap; ++j) {
477 			len = sg_dma_len(&chunk->page_list[j]) >> shift;
478 			for (k = 0; k < len; ++k) {
479 				pages[i++] =
480 					sg_dma_address(&chunk->page_list[j]) +
481 					(c2mr->umem->page_size * k);
482 			}
483 		}
484 	}
485 
486 	kva = virt;
487   	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
488 					 pages,
489 					 c2mr->umem->page_size,
490 					 i,
491 					 length,
492 					 c2mr->umem->offset,
493 					 &kva,
494 					 c2_convert_access(acc),
495 					 c2mr);
496 	kfree(pages);
497 	if (err)
498 		goto err;
499 	return &c2mr->ibmr;
500 
501 err:
502 	ib_umem_release(c2mr->umem);
503 	kfree(c2mr);
504 	return ERR_PTR(err);
505 }
506 
c2_dereg_mr(struct ib_mr * ib_mr)507 static int c2_dereg_mr(struct ib_mr *ib_mr)
508 {
509 	struct c2_mr *mr = to_c2mr(ib_mr);
510 	int err;
511 
512 	pr_debug("%s:%u\n", __func__, __LINE__);
513 
514 	err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
515 	if (err)
516 		pr_debug("c2_stag_dealloc failed: %d\n", err);
517 	else {
518 		if (mr->umem)
519 			ib_umem_release(mr->umem);
520 		kfree(mr);
521 	}
522 
523 	return err;
524 }
525 
show_rev(struct device * dev,struct device_attribute * attr,char * buf)526 static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
527 			char *buf)
528 {
529 	struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
530 	pr_debug("%s:%u\n", __func__, __LINE__);
531 	return sprintf(buf, "%x\n", c2dev->props.hw_ver);
532 }
533 
show_fw_ver(struct device * dev,struct device_attribute * attr,char * buf)534 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
535 			   char *buf)
536 {
537 	struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
538 	pr_debug("%s:%u\n", __func__, __LINE__);
539 	return sprintf(buf, "%x.%x.%x\n",
540 		       (int) (c2dev->props.fw_ver >> 32),
541 		       (int) (c2dev->props.fw_ver >> 16) & 0xffff,
542 		       (int) (c2dev->props.fw_ver & 0xffff));
543 }
544 
show_hca(struct device * dev,struct device_attribute * attr,char * buf)545 static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
546 			char *buf)
547 {
548 	pr_debug("%s:%u\n", __func__, __LINE__);
549 	return sprintf(buf, "AMSO1100\n");
550 }
551 
show_board(struct device * dev,struct device_attribute * attr,char * buf)552 static ssize_t show_board(struct device *dev, struct device_attribute *attr,
553 			  char *buf)
554 {
555 	pr_debug("%s:%u\n", __func__, __LINE__);
556 	return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
557 }
558 
559 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
560 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
561 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
562 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
563 
564 static struct device_attribute *c2_dev_attributes[] = {
565 	&dev_attr_hw_rev,
566 	&dev_attr_fw_ver,
567 	&dev_attr_hca_type,
568 	&dev_attr_board_id
569 };
570 
c2_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)571 static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
572 			int attr_mask, struct ib_udata *udata)
573 {
574 	int err;
575 
576 	err =
577 	    c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
578 			 attr_mask);
579 
580 	return err;
581 }
582 
c2_multicast_attach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)583 static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
584 {
585 	pr_debug("%s:%u\n", __func__, __LINE__);
586 	return -ENOSYS;
587 }
588 
c2_multicast_detach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)589 static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
590 {
591 	pr_debug("%s:%u\n", __func__, __LINE__);
592 	return -ENOSYS;
593 }
594 
c2_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,struct ib_wc * in_wc,struct ib_grh * in_grh,struct ib_mad * in_mad,struct ib_mad * out_mad)595 static int c2_process_mad(struct ib_device *ibdev,
596 			  int mad_flags,
597 			  u8 port_num,
598 			  struct ib_wc *in_wc,
599 			  struct ib_grh *in_grh,
600 			  struct ib_mad *in_mad, struct ib_mad *out_mad)
601 {
602 	pr_debug("%s:%u\n", __func__, __LINE__);
603 	return -ENOSYS;
604 }
605 
c2_connect(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)606 static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
607 {
608 	pr_debug("%s:%u\n", __func__, __LINE__);
609 
610 	/* Request a connection */
611 	return c2_llp_connect(cm_id, iw_param);
612 }
613 
c2_accept(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)614 static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
615 {
616 	pr_debug("%s:%u\n", __func__, __LINE__);
617 
618 	/* Accept the new connection */
619 	return c2_llp_accept(cm_id, iw_param);
620 }
621 
c2_reject(struct iw_cm_id * cm_id,const void * pdata,u8 pdata_len)622 static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
623 {
624 	int err;
625 
626 	pr_debug("%s:%u\n", __func__, __LINE__);
627 
628 	err = c2_llp_reject(cm_id, pdata, pdata_len);
629 	return err;
630 }
631 
c2_service_create(struct iw_cm_id * cm_id,int backlog)632 static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
633 {
634 	int err;
635 
636 	pr_debug("%s:%u\n", __func__, __LINE__);
637 	err = c2_llp_service_create(cm_id, backlog);
638 	pr_debug("%s:%u err=%d\n",
639 		__func__, __LINE__,
640 		err);
641 	return err;
642 }
643 
c2_service_destroy(struct iw_cm_id * cm_id)644 static int c2_service_destroy(struct iw_cm_id *cm_id)
645 {
646 	int err;
647 	pr_debug("%s:%u\n", __func__, __LINE__);
648 
649 	err = c2_llp_service_destroy(cm_id);
650 
651 	return err;
652 }
653 
c2_pseudo_up(struct net_device * netdev)654 static int c2_pseudo_up(struct net_device *netdev)
655 {
656 	struct in_device *ind;
657 	struct c2_dev *c2dev = netdev->ml_priv;
658 
659 	ind = in_dev_get(netdev);
660 	if (!ind)
661 		return 0;
662 
663 	pr_debug("adding...\n");
664 	for_ifa(ind) {
665 #ifdef DEBUG
666 		u8 *ip = (u8 *) & ifa->ifa_address;
667 
668 		pr_debug("%s: %d.%d.%d.%d\n",
669 		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
670 #endif
671 		c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
672 	}
673 	endfor_ifa(ind);
674 	in_dev_put(ind);
675 
676 	return 0;
677 }
678 
c2_pseudo_down(struct net_device * netdev)679 static int c2_pseudo_down(struct net_device *netdev)
680 {
681 	struct in_device *ind;
682 	struct c2_dev *c2dev = netdev->ml_priv;
683 
684 	ind = in_dev_get(netdev);
685 	if (!ind)
686 		return 0;
687 
688 	pr_debug("deleting...\n");
689 	for_ifa(ind) {
690 #ifdef DEBUG
691 		u8 *ip = (u8 *) & ifa->ifa_address;
692 
693 		pr_debug("%s: %d.%d.%d.%d\n",
694 		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
695 #endif
696 		c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
697 	}
698 	endfor_ifa(ind);
699 	in_dev_put(ind);
700 
701 	return 0;
702 }
703 
c2_pseudo_xmit_frame(struct sk_buff * skb,struct net_device * netdev)704 static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
705 {
706 	kfree_skb(skb);
707 	return NETDEV_TX_OK;
708 }
709 
c2_pseudo_change_mtu(struct net_device * netdev,int new_mtu)710 static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
711 {
712 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
713 		return -EINVAL;
714 
715 	netdev->mtu = new_mtu;
716 
717 	/* TODO: Tell rnic about new rmda interface mtu */
718 	return 0;
719 }
720 
721 static const struct net_device_ops c2_pseudo_netdev_ops = {
722 	.ndo_open 		= c2_pseudo_up,
723 	.ndo_stop 		= c2_pseudo_down,
724 	.ndo_start_xmit 	= c2_pseudo_xmit_frame,
725 	.ndo_change_mtu 	= c2_pseudo_change_mtu,
726 	.ndo_validate_addr	= eth_validate_addr,
727 };
728 
setup(struct net_device * netdev)729 static void setup(struct net_device *netdev)
730 {
731 	netdev->netdev_ops = &c2_pseudo_netdev_ops;
732 
733 	netdev->watchdog_timeo = 0;
734 	netdev->type = ARPHRD_ETHER;
735 	netdev->mtu = 1500;
736 	netdev->hard_header_len = ETH_HLEN;
737 	netdev->addr_len = ETH_ALEN;
738 	netdev->tx_queue_len = 0;
739 	netdev->flags |= IFF_NOARP;
740 }
741 
c2_pseudo_netdev_init(struct c2_dev * c2dev)742 static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
743 {
744 	char name[IFNAMSIZ];
745 	struct net_device *netdev;
746 
747 	/* change ethxxx to iwxxx */
748 	strcpy(name, "iw");
749 	strcat(name, &c2dev->netdev->name[3]);
750 	netdev = alloc_netdev(0, name, setup);
751 	if (!netdev) {
752 		printk(KERN_ERR PFX "%s -  etherdev alloc failed",
753 			__func__);
754 		return NULL;
755 	}
756 
757 	netdev->ml_priv = c2dev;
758 
759 	SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
760 
761 	memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
762 
763 	/* Print out the MAC address */
764 	pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n",
765 		netdev->name,
766 		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
767 		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
768 
769 #if 0
770 	/* Disable network packets */
771 	netif_stop_queue(netdev);
772 #endif
773 	return netdev;
774 }
775 
c2_register_device(struct c2_dev * dev)776 int c2_register_device(struct c2_dev *dev)
777 {
778 	int ret = -ENOMEM;
779 	int i;
780 
781 	/* Register pseudo network device */
782 	dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
783 	if (!dev->pseudo_netdev)
784 		goto out;
785 
786 	ret = register_netdev(dev->pseudo_netdev);
787 	if (ret)
788 		goto out_free_netdev;
789 
790 	pr_debug("%s:%u\n", __func__, __LINE__);
791 	strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
792 	dev->ibdev.owner = THIS_MODULE;
793 	dev->ibdev.uverbs_cmd_mask =
794 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
795 	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
796 	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
797 	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
798 	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
799 	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
800 	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
801 	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
802 	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
803 	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
804 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
805 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
806 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
807 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
808 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
809 	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
810 	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
811 
812 	dev->ibdev.node_type = RDMA_NODE_RNIC;
813 	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
814 	memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
815 	dev->ibdev.phys_port_cnt = 1;
816 	dev->ibdev.num_comp_vectors = 1;
817 	dev->ibdev.dma_device = &dev->pcidev->dev;
818 	dev->ibdev.query_device = c2_query_device;
819 	dev->ibdev.query_port = c2_query_port;
820 	dev->ibdev.modify_port = c2_modify_port;
821 	dev->ibdev.query_pkey = c2_query_pkey;
822 	dev->ibdev.query_gid = c2_query_gid;
823 	dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
824 	dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
825 	dev->ibdev.mmap = c2_mmap_uar;
826 	dev->ibdev.alloc_pd = c2_alloc_pd;
827 	dev->ibdev.dealloc_pd = c2_dealloc_pd;
828 	dev->ibdev.create_ah = c2_ah_create;
829 	dev->ibdev.destroy_ah = c2_ah_destroy;
830 	dev->ibdev.create_qp = c2_create_qp;
831 	dev->ibdev.modify_qp = c2_modify_qp;
832 	dev->ibdev.destroy_qp = c2_destroy_qp;
833 	dev->ibdev.create_cq = c2_create_cq;
834 	dev->ibdev.destroy_cq = c2_destroy_cq;
835 	dev->ibdev.poll_cq = c2_poll_cq;
836 	dev->ibdev.get_dma_mr = c2_get_dma_mr;
837 	dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
838 	dev->ibdev.reg_user_mr = c2_reg_user_mr;
839 	dev->ibdev.dereg_mr = c2_dereg_mr;
840 
841 	dev->ibdev.alloc_fmr = NULL;
842 	dev->ibdev.unmap_fmr = NULL;
843 	dev->ibdev.dealloc_fmr = NULL;
844 	dev->ibdev.map_phys_fmr = NULL;
845 
846 	dev->ibdev.attach_mcast = c2_multicast_attach;
847 	dev->ibdev.detach_mcast = c2_multicast_detach;
848 	dev->ibdev.process_mad = c2_process_mad;
849 
850 	dev->ibdev.req_notify_cq = c2_arm_cq;
851 	dev->ibdev.post_send = c2_post_send;
852 	dev->ibdev.post_recv = c2_post_receive;
853 
854 	dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
855 	if (dev->ibdev.iwcm == NULL) {
856 		ret = -ENOMEM;
857 		goto out_unregister_netdev;
858 	}
859 	dev->ibdev.iwcm->add_ref = c2_add_ref;
860 	dev->ibdev.iwcm->rem_ref = c2_rem_ref;
861 	dev->ibdev.iwcm->get_qp = c2_get_qp;
862 	dev->ibdev.iwcm->connect = c2_connect;
863 	dev->ibdev.iwcm->accept = c2_accept;
864 	dev->ibdev.iwcm->reject = c2_reject;
865 	dev->ibdev.iwcm->create_listen = c2_service_create;
866 	dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
867 
868 	ret = ib_register_device(&dev->ibdev, NULL);
869 	if (ret)
870 		goto out_free_iwcm;
871 
872 	for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
873 		ret = device_create_file(&dev->ibdev.dev,
874 					       c2_dev_attributes[i]);
875 		if (ret)
876 			goto out_unregister_ibdev;
877 	}
878 	goto out;
879 
880 out_unregister_ibdev:
881 	ib_unregister_device(&dev->ibdev);
882 out_free_iwcm:
883 	kfree(dev->ibdev.iwcm);
884 out_unregister_netdev:
885 	unregister_netdev(dev->pseudo_netdev);
886 out_free_netdev:
887 	free_netdev(dev->pseudo_netdev);
888 out:
889 	pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
890 	return ret;
891 }
892 
c2_unregister_device(struct c2_dev * dev)893 void c2_unregister_device(struct c2_dev *dev)
894 {
895 	pr_debug("%s:%u\n", __func__, __LINE__);
896 	unregister_netdev(dev->pseudo_netdev);
897 	free_netdev(dev->pseudo_netdev);
898 	ib_unregister_device(&dev->ibdev);
899 }
900