1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/io.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
41 #include <linux/mm.h>
42 #include <linux/random.h>
43 
44 #include "qib.h"
45 #include "qib_common.h"
46 
47 static unsigned int ib_qib_qp_table_size = 256;
48 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
49 MODULE_PARM_DESC(qp_table_size, "QP table size");
50 
51 unsigned int ib_qib_lkey_table_size = 16;
52 module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
53 		   S_IRUGO);
54 MODULE_PARM_DESC(lkey_table_size,
55 		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
56 
57 static unsigned int ib_qib_max_pds = 0xFFFF;
58 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
59 MODULE_PARM_DESC(max_pds,
60 		 "Maximum number of protection domains to support");
61 
62 static unsigned int ib_qib_max_ahs = 0xFFFF;
63 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
64 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
65 
66 unsigned int ib_qib_max_cqes = 0x2FFFF;
67 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
68 MODULE_PARM_DESC(max_cqes,
69 		 "Maximum number of completion queue entries to support");
70 
71 unsigned int ib_qib_max_cqs = 0x1FFFF;
72 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
74 
75 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
76 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
77 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
78 
79 unsigned int ib_qib_max_qps = 16384;
80 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
81 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
82 
83 unsigned int ib_qib_max_sges = 0x60;
84 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
85 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
86 
87 unsigned int ib_qib_max_mcast_grps = 16384;
88 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
89 MODULE_PARM_DESC(max_mcast_grps,
90 		 "Maximum number of multicast groups to support");
91 
92 unsigned int ib_qib_max_mcast_qp_attached = 16;
93 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
94 		   uint, S_IRUGO);
95 MODULE_PARM_DESC(max_mcast_qp_attached,
96 		 "Maximum number of attached QPs to support");
97 
98 unsigned int ib_qib_max_srqs = 1024;
99 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
100 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
101 
102 unsigned int ib_qib_max_srq_sges = 128;
103 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
104 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
105 
106 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
107 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109 
110 static unsigned int ib_qib_disable_sma;
111 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
113 
114 /*
115  * Note that it is OK to post send work requests in the SQE and ERR
116  * states; qib_do_send() will process them and generate error
117  * completions as per IB 1.2 C10-96.
118  */
119 const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
120 	[IB_QPS_RESET] = 0,
121 	[IB_QPS_INIT] = QIB_POST_RECV_OK,
122 	[IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
123 	[IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
124 	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
125 	    QIB_PROCESS_NEXT_SEND_OK,
126 	[IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
127 	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
128 	[IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
130 	[IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
131 	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132 };
133 
134 struct qib_ucontext {
135 	struct ib_ucontext ibucontext;
136 };
137 
to_iucontext(struct ib_ucontext * ibucontext)138 static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
139 						  *ibucontext)
140 {
141 	return container_of(ibucontext, struct qib_ucontext, ibucontext);
142 }
143 
144 /*
145  * Translate ib_wr_opcode into ib_wc_opcode.
146  */
147 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
148 	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
149 	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
150 	[IB_WR_SEND] = IB_WC_SEND,
151 	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
152 	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
153 	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
154 	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
155 };
156 
157 /*
158  * System image GUID.
159  */
160 __be64 ib_qib_sys_image_guid;
161 
162 /**
163  * qib_copy_sge - copy data to SGE memory
164  * @ss: the SGE state
165  * @data: the data to copy
166  * @length: the length of the data
167  */
qib_copy_sge(struct qib_sge_state * ss,void * data,u32 length,int release)168 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
169 {
170 	struct qib_sge *sge = &ss->sge;
171 
172 	while (length) {
173 		u32 len = sge->length;
174 
175 		if (len > length)
176 			len = length;
177 		if (len > sge->sge_length)
178 			len = sge->sge_length;
179 		BUG_ON(len == 0);
180 		memcpy(sge->vaddr, data, len);
181 		sge->vaddr += len;
182 		sge->length -= len;
183 		sge->sge_length -= len;
184 		if (sge->sge_length == 0) {
185 			if (release)
186 				atomic_dec(&sge->mr->refcount);
187 			if (--ss->num_sge)
188 				*sge = *ss->sg_list++;
189 		} else if (sge->length == 0 && sge->mr->lkey) {
190 			if (++sge->n >= QIB_SEGSZ) {
191 				if (++sge->m >= sge->mr->mapsz)
192 					break;
193 				sge->n = 0;
194 			}
195 			sge->vaddr =
196 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
197 			sge->length =
198 				sge->mr->map[sge->m]->segs[sge->n].length;
199 		}
200 		data += len;
201 		length -= len;
202 	}
203 }
204 
205 /**
206  * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
207  * @ss: the SGE state
208  * @length: the number of bytes to skip
209  */
qib_skip_sge(struct qib_sge_state * ss,u32 length,int release)210 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
211 {
212 	struct qib_sge *sge = &ss->sge;
213 
214 	while (length) {
215 		u32 len = sge->length;
216 
217 		if (len > length)
218 			len = length;
219 		if (len > sge->sge_length)
220 			len = sge->sge_length;
221 		BUG_ON(len == 0);
222 		sge->vaddr += len;
223 		sge->length -= len;
224 		sge->sge_length -= len;
225 		if (sge->sge_length == 0) {
226 			if (release)
227 				atomic_dec(&sge->mr->refcount);
228 			if (--ss->num_sge)
229 				*sge = *ss->sg_list++;
230 		} else if (sge->length == 0 && sge->mr->lkey) {
231 			if (++sge->n >= QIB_SEGSZ) {
232 				if (++sge->m >= sge->mr->mapsz)
233 					break;
234 				sge->n = 0;
235 			}
236 			sge->vaddr =
237 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
238 			sge->length =
239 				sge->mr->map[sge->m]->segs[sge->n].length;
240 		}
241 		length -= len;
242 	}
243 }
244 
245 /*
246  * Count the number of DMA descriptors needed to send length bytes of data.
247  * Don't modify the qib_sge_state to get the count.
248  * Return zero if any of the segments is not aligned.
249  */
qib_count_sge(struct qib_sge_state * ss,u32 length)250 static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
251 {
252 	struct qib_sge *sg_list = ss->sg_list;
253 	struct qib_sge sge = ss->sge;
254 	u8 num_sge = ss->num_sge;
255 	u32 ndesc = 1;  /* count the header */
256 
257 	while (length) {
258 		u32 len = sge.length;
259 
260 		if (len > length)
261 			len = length;
262 		if (len > sge.sge_length)
263 			len = sge.sge_length;
264 		BUG_ON(len == 0);
265 		if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 		    (len != length && (len & (sizeof(u32) - 1)))) {
267 			ndesc = 0;
268 			break;
269 		}
270 		ndesc++;
271 		sge.vaddr += len;
272 		sge.length -= len;
273 		sge.sge_length -= len;
274 		if (sge.sge_length == 0) {
275 			if (--num_sge)
276 				sge = *sg_list++;
277 		} else if (sge.length == 0 && sge.mr->lkey) {
278 			if (++sge.n >= QIB_SEGSZ) {
279 				if (++sge.m >= sge.mr->mapsz)
280 					break;
281 				sge.n = 0;
282 			}
283 			sge.vaddr =
284 				sge.mr->map[sge.m]->segs[sge.n].vaddr;
285 			sge.length =
286 				sge.mr->map[sge.m]->segs[sge.n].length;
287 		}
288 		length -= len;
289 	}
290 	return ndesc;
291 }
292 
293 /*
294  * Copy from the SGEs to the data buffer.
295  */
qib_copy_from_sge(void * data,struct qib_sge_state * ss,u32 length)296 static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
297 {
298 	struct qib_sge *sge = &ss->sge;
299 
300 	while (length) {
301 		u32 len = sge->length;
302 
303 		if (len > length)
304 			len = length;
305 		if (len > sge->sge_length)
306 			len = sge->sge_length;
307 		BUG_ON(len == 0);
308 		memcpy(data, sge->vaddr, len);
309 		sge->vaddr += len;
310 		sge->length -= len;
311 		sge->sge_length -= len;
312 		if (sge->sge_length == 0) {
313 			if (--ss->num_sge)
314 				*sge = *ss->sg_list++;
315 		} else if (sge->length == 0 && sge->mr->lkey) {
316 			if (++sge->n >= QIB_SEGSZ) {
317 				if (++sge->m >= sge->mr->mapsz)
318 					break;
319 				sge->n = 0;
320 			}
321 			sge->vaddr =
322 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
323 			sge->length =
324 				sge->mr->map[sge->m]->segs[sge->n].length;
325 		}
326 		data += len;
327 		length -= len;
328 	}
329 }
330 
331 /**
332  * qib_post_one_send - post one RC, UC, or UD send work request
333  * @qp: the QP to post on
334  * @wr: the work request to send
335  */
qib_post_one_send(struct qib_qp * qp,struct ib_send_wr * wr)336 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
337 {
338 	struct qib_swqe *wqe;
339 	u32 next;
340 	int i;
341 	int j;
342 	int acc;
343 	int ret;
344 	unsigned long flags;
345 	struct qib_lkey_table *rkt;
346 	struct qib_pd *pd;
347 
348 	spin_lock_irqsave(&qp->s_lock, flags);
349 
350 	/* Check that state is OK to post send. */
351 	if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
352 		goto bail_inval;
353 
354 	/* IB spec says that num_sge == 0 is OK. */
355 	if (wr->num_sge > qp->s_max_sge)
356 		goto bail_inval;
357 
358 	/*
359 	 * Don't allow RDMA reads or atomic operations on UC or
360 	 * undefined operations.
361 	 * Make sure buffer is large enough to hold the result for atomics.
362 	 */
363 	if (wr->opcode == IB_WR_FAST_REG_MR) {
364 		if (qib_fast_reg_mr(qp, wr))
365 			goto bail_inval;
366 	} else if (qp->ibqp.qp_type == IB_QPT_UC) {
367 		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
368 			goto bail_inval;
369 	} else if (qp->ibqp.qp_type != IB_QPT_RC) {
370 		/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
371 		if (wr->opcode != IB_WR_SEND &&
372 		    wr->opcode != IB_WR_SEND_WITH_IMM)
373 			goto bail_inval;
374 		/* Check UD destination address PD */
375 		if (qp->ibqp.pd != wr->wr.ud.ah->pd)
376 			goto bail_inval;
377 	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
378 		goto bail_inval;
379 	else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
380 		   (wr->num_sge == 0 ||
381 		    wr->sg_list[0].length < sizeof(u64) ||
382 		    wr->sg_list[0].addr & (sizeof(u64) - 1)))
383 		goto bail_inval;
384 	else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
385 		goto bail_inval;
386 
387 	next = qp->s_head + 1;
388 	if (next >= qp->s_size)
389 		next = 0;
390 	if (next == qp->s_last) {
391 		ret = -ENOMEM;
392 		goto bail;
393 	}
394 
395 	rkt = &to_idev(qp->ibqp.device)->lk_table;
396 	pd = to_ipd(qp->ibqp.pd);
397 	wqe = get_swqe_ptr(qp, qp->s_head);
398 	wqe->wr = *wr;
399 	wqe->length = 0;
400 	j = 0;
401 	if (wr->num_sge) {
402 		acc = wr->opcode >= IB_WR_RDMA_READ ?
403 			IB_ACCESS_LOCAL_WRITE : 0;
404 		for (i = 0; i < wr->num_sge; i++) {
405 			u32 length = wr->sg_list[i].length;
406 			int ok;
407 
408 			if (length == 0)
409 				continue;
410 			ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
411 					 &wr->sg_list[i], acc);
412 			if (!ok)
413 				goto bail_inval_free;
414 			wqe->length += length;
415 			j++;
416 		}
417 		wqe->wr.num_sge = j;
418 	}
419 	if (qp->ibqp.qp_type == IB_QPT_UC ||
420 	    qp->ibqp.qp_type == IB_QPT_RC) {
421 		if (wqe->length > 0x80000000U)
422 			goto bail_inval_free;
423 	} else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
424 				  qp->port_num - 1)->ibmtu)
425 		goto bail_inval_free;
426 	else
427 		atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
428 	wqe->ssn = qp->s_ssn++;
429 	qp->s_head = next;
430 
431 	ret = 0;
432 	goto bail;
433 
434 bail_inval_free:
435 	while (j) {
436 		struct qib_sge *sge = &wqe->sg_list[--j];
437 
438 		atomic_dec(&sge->mr->refcount);
439 	}
440 bail_inval:
441 	ret = -EINVAL;
442 bail:
443 	spin_unlock_irqrestore(&qp->s_lock, flags);
444 	return ret;
445 }
446 
447 /**
448  * qib_post_send - post a send on a QP
449  * @ibqp: the QP to post the send on
450  * @wr: the list of work requests to post
451  * @bad_wr: the first bad WR is put here
452  *
453  * This may be called from interrupt context.
454  */
qib_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)455 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
456 			 struct ib_send_wr **bad_wr)
457 {
458 	struct qib_qp *qp = to_iqp(ibqp);
459 	int err = 0;
460 
461 	for (; wr; wr = wr->next) {
462 		err = qib_post_one_send(qp, wr);
463 		if (err) {
464 			*bad_wr = wr;
465 			goto bail;
466 		}
467 	}
468 
469 	/* Try to do the send work in the caller's context. */
470 	qib_do_send(&qp->s_work);
471 
472 bail:
473 	return err;
474 }
475 
476 /**
477  * qib_post_receive - post a receive on a QP
478  * @ibqp: the QP to post the receive on
479  * @wr: the WR to post
480  * @bad_wr: the first bad WR is put here
481  *
482  * This may be called from interrupt context.
483  */
qib_post_receive(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)484 static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
485 			    struct ib_recv_wr **bad_wr)
486 {
487 	struct qib_qp *qp = to_iqp(ibqp);
488 	struct qib_rwq *wq = qp->r_rq.wq;
489 	unsigned long flags;
490 	int ret;
491 
492 	/* Check that state is OK to post receive. */
493 	if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
494 		*bad_wr = wr;
495 		ret = -EINVAL;
496 		goto bail;
497 	}
498 
499 	for (; wr; wr = wr->next) {
500 		struct qib_rwqe *wqe;
501 		u32 next;
502 		int i;
503 
504 		if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
505 			*bad_wr = wr;
506 			ret = -EINVAL;
507 			goto bail;
508 		}
509 
510 		spin_lock_irqsave(&qp->r_rq.lock, flags);
511 		next = wq->head + 1;
512 		if (next >= qp->r_rq.size)
513 			next = 0;
514 		if (next == wq->tail) {
515 			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
516 			*bad_wr = wr;
517 			ret = -ENOMEM;
518 			goto bail;
519 		}
520 
521 		wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
522 		wqe->wr_id = wr->wr_id;
523 		wqe->num_sge = wr->num_sge;
524 		for (i = 0; i < wr->num_sge; i++)
525 			wqe->sg_list[i] = wr->sg_list[i];
526 		/* Make sure queue entry is written before the head index. */
527 		smp_wmb();
528 		wq->head = next;
529 		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
530 	}
531 	ret = 0;
532 
533 bail:
534 	return ret;
535 }
536 
537 /**
538  * qib_qp_rcv - processing an incoming packet on a QP
539  * @rcd: the context pointer
540  * @hdr: the packet header
541  * @has_grh: true if the packet has a GRH
542  * @data: the packet data
543  * @tlen: the packet length
544  * @qp: the QP the packet came on
545  *
546  * This is called from qib_ib_rcv() to process an incoming packet
547  * for the given QP.
548  * Called at interrupt level.
549  */
qib_qp_rcv(struct qib_ctxtdata * rcd,struct qib_ib_header * hdr,int has_grh,void * data,u32 tlen,struct qib_qp * qp)550 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
551 		       int has_grh, void *data, u32 tlen, struct qib_qp *qp)
552 {
553 	struct qib_ibport *ibp = &rcd->ppd->ibport_data;
554 
555 	spin_lock(&qp->r_lock);
556 
557 	/* Check for valid receive state. */
558 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
559 		ibp->n_pkt_drops++;
560 		goto unlock;
561 	}
562 
563 	switch (qp->ibqp.qp_type) {
564 	case IB_QPT_SMI:
565 	case IB_QPT_GSI:
566 		if (ib_qib_disable_sma)
567 			break;
568 		/* FALLTHROUGH */
569 	case IB_QPT_UD:
570 		qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
571 		break;
572 
573 	case IB_QPT_RC:
574 		qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
575 		break;
576 
577 	case IB_QPT_UC:
578 		qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
579 		break;
580 
581 	default:
582 		break;
583 	}
584 
585 unlock:
586 	spin_unlock(&qp->r_lock);
587 }
588 
589 /**
590  * qib_ib_rcv - process an incoming packet
591  * @rcd: the context pointer
592  * @rhdr: the header of the packet
593  * @data: the packet payload
594  * @tlen: the packet length
595  *
596  * This is called from qib_kreceive() to process an incoming packet at
597  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
598  */
qib_ib_rcv(struct qib_ctxtdata * rcd,void * rhdr,void * data,u32 tlen)599 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
600 {
601 	struct qib_pportdata *ppd = rcd->ppd;
602 	struct qib_ibport *ibp = &ppd->ibport_data;
603 	struct qib_ib_header *hdr = rhdr;
604 	struct qib_other_headers *ohdr;
605 	struct qib_qp *qp;
606 	u32 qp_num;
607 	int lnh;
608 	u8 opcode;
609 	u16 lid;
610 
611 	/* 24 == LRH+BTH+CRC */
612 	if (unlikely(tlen < 24))
613 		goto drop;
614 
615 	/* Check for a valid destination LID (see ch. 7.11.1). */
616 	lid = be16_to_cpu(hdr->lrh[1]);
617 	if (lid < QIB_MULTICAST_LID_BASE) {
618 		lid &= ~((1 << ppd->lmc) - 1);
619 		if (unlikely(lid != ppd->lid))
620 			goto drop;
621 	}
622 
623 	/* Check for GRH */
624 	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
625 	if (lnh == QIB_LRH_BTH)
626 		ohdr = &hdr->u.oth;
627 	else if (lnh == QIB_LRH_GRH) {
628 		u32 vtf;
629 
630 		ohdr = &hdr->u.l.oth;
631 		if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
632 			goto drop;
633 		vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
634 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
635 			goto drop;
636 	} else
637 		goto drop;
638 
639 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
640 	ibp->opstats[opcode & 0x7f].n_bytes += tlen;
641 	ibp->opstats[opcode & 0x7f].n_packets++;
642 
643 	/* Get the destination QP number. */
644 	qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
645 	if (qp_num == QIB_MULTICAST_QPN) {
646 		struct qib_mcast *mcast;
647 		struct qib_mcast_qp *p;
648 
649 		if (lnh != QIB_LRH_GRH)
650 			goto drop;
651 		mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
652 		if (mcast == NULL)
653 			goto drop;
654 		ibp->n_multicast_rcv++;
655 		list_for_each_entry_rcu(p, &mcast->qp_list, list)
656 			qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
657 		/*
658 		 * Notify qib_multicast_detach() if it is waiting for us
659 		 * to finish.
660 		 */
661 		if (atomic_dec_return(&mcast->refcount) <= 1)
662 			wake_up(&mcast->wait);
663 	} else {
664 		if (rcd->lookaside_qp) {
665 			if (rcd->lookaside_qpn != qp_num) {
666 				if (atomic_dec_and_test(
667 					&rcd->lookaside_qp->refcount))
668 					wake_up(
669 					 &rcd->lookaside_qp->wait);
670 					rcd->lookaside_qp = NULL;
671 				}
672 		}
673 		if (!rcd->lookaside_qp) {
674 			qp = qib_lookup_qpn(ibp, qp_num);
675 			if (!qp)
676 				goto drop;
677 			rcd->lookaside_qp = qp;
678 			rcd->lookaside_qpn = qp_num;
679 		} else
680 			qp = rcd->lookaside_qp;
681 		ibp->n_unicast_rcv++;
682 		qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
683 	}
684 	return;
685 
686 drop:
687 	ibp->n_pkt_drops++;
688 }
689 
690 /*
691  * This is called from a timer to check for QPs
692  * which need kernel memory in order to send a packet.
693  */
mem_timer(unsigned long data)694 static void mem_timer(unsigned long data)
695 {
696 	struct qib_ibdev *dev = (struct qib_ibdev *) data;
697 	struct list_head *list = &dev->memwait;
698 	struct qib_qp *qp = NULL;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&dev->pending_lock, flags);
702 	if (!list_empty(list)) {
703 		qp = list_entry(list->next, struct qib_qp, iowait);
704 		list_del_init(&qp->iowait);
705 		atomic_inc(&qp->refcount);
706 		if (!list_empty(list))
707 			mod_timer(&dev->mem_timer, jiffies + 1);
708 	}
709 	spin_unlock_irqrestore(&dev->pending_lock, flags);
710 
711 	if (qp) {
712 		spin_lock_irqsave(&qp->s_lock, flags);
713 		if (qp->s_flags & QIB_S_WAIT_KMEM) {
714 			qp->s_flags &= ~QIB_S_WAIT_KMEM;
715 			qib_schedule_send(qp);
716 		}
717 		spin_unlock_irqrestore(&qp->s_lock, flags);
718 		if (atomic_dec_and_test(&qp->refcount))
719 			wake_up(&qp->wait);
720 	}
721 }
722 
update_sge(struct qib_sge_state * ss,u32 length)723 static void update_sge(struct qib_sge_state *ss, u32 length)
724 {
725 	struct qib_sge *sge = &ss->sge;
726 
727 	sge->vaddr += length;
728 	sge->length -= length;
729 	sge->sge_length -= length;
730 	if (sge->sge_length == 0) {
731 		if (--ss->num_sge)
732 			*sge = *ss->sg_list++;
733 	} else if (sge->length == 0 && sge->mr->lkey) {
734 		if (++sge->n >= QIB_SEGSZ) {
735 			if (++sge->m >= sge->mr->mapsz)
736 				return;
737 			sge->n = 0;
738 		}
739 		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
740 		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
741 	}
742 }
743 
744 #ifdef __LITTLE_ENDIAN
get_upper_bits(u32 data,u32 shift)745 static inline u32 get_upper_bits(u32 data, u32 shift)
746 {
747 	return data >> shift;
748 }
749 
set_upper_bits(u32 data,u32 shift)750 static inline u32 set_upper_bits(u32 data, u32 shift)
751 {
752 	return data << shift;
753 }
754 
clear_upper_bytes(u32 data,u32 n,u32 off)755 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
756 {
757 	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
758 	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
759 	return data;
760 }
761 #else
get_upper_bits(u32 data,u32 shift)762 static inline u32 get_upper_bits(u32 data, u32 shift)
763 {
764 	return data << shift;
765 }
766 
set_upper_bits(u32 data,u32 shift)767 static inline u32 set_upper_bits(u32 data, u32 shift)
768 {
769 	return data >> shift;
770 }
771 
clear_upper_bytes(u32 data,u32 n,u32 off)772 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
773 {
774 	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
775 	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
776 	return data;
777 }
778 #endif
779 
copy_io(u32 __iomem * piobuf,struct qib_sge_state * ss,u32 length,unsigned flush_wc)780 static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
781 		    u32 length, unsigned flush_wc)
782 {
783 	u32 extra = 0;
784 	u32 data = 0;
785 	u32 last;
786 
787 	while (1) {
788 		u32 len = ss->sge.length;
789 		u32 off;
790 
791 		if (len > length)
792 			len = length;
793 		if (len > ss->sge.sge_length)
794 			len = ss->sge.sge_length;
795 		BUG_ON(len == 0);
796 		/* If the source address is not aligned, try to align it. */
797 		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
798 		if (off) {
799 			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
800 					    ~(sizeof(u32) - 1));
801 			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
802 			u32 y;
803 
804 			y = sizeof(u32) - off;
805 			if (len > y)
806 				len = y;
807 			if (len + extra >= sizeof(u32)) {
808 				data |= set_upper_bits(v, extra *
809 						       BITS_PER_BYTE);
810 				len = sizeof(u32) - extra;
811 				if (len == length) {
812 					last = data;
813 					break;
814 				}
815 				__raw_writel(data, piobuf);
816 				piobuf++;
817 				extra = 0;
818 				data = 0;
819 			} else {
820 				/* Clear unused upper bytes */
821 				data |= clear_upper_bytes(v, len, extra);
822 				if (len == length) {
823 					last = data;
824 					break;
825 				}
826 				extra += len;
827 			}
828 		} else if (extra) {
829 			/* Source address is aligned. */
830 			u32 *addr = (u32 *) ss->sge.vaddr;
831 			int shift = extra * BITS_PER_BYTE;
832 			int ushift = 32 - shift;
833 			u32 l = len;
834 
835 			while (l >= sizeof(u32)) {
836 				u32 v = *addr;
837 
838 				data |= set_upper_bits(v, shift);
839 				__raw_writel(data, piobuf);
840 				data = get_upper_bits(v, ushift);
841 				piobuf++;
842 				addr++;
843 				l -= sizeof(u32);
844 			}
845 			/*
846 			 * We still have 'extra' number of bytes leftover.
847 			 */
848 			if (l) {
849 				u32 v = *addr;
850 
851 				if (l + extra >= sizeof(u32)) {
852 					data |= set_upper_bits(v, shift);
853 					len -= l + extra - sizeof(u32);
854 					if (len == length) {
855 						last = data;
856 						break;
857 					}
858 					__raw_writel(data, piobuf);
859 					piobuf++;
860 					extra = 0;
861 					data = 0;
862 				} else {
863 					/* Clear unused upper bytes */
864 					data |= clear_upper_bytes(v, l, extra);
865 					if (len == length) {
866 						last = data;
867 						break;
868 					}
869 					extra += l;
870 				}
871 			} else if (len == length) {
872 				last = data;
873 				break;
874 			}
875 		} else if (len == length) {
876 			u32 w;
877 
878 			/*
879 			 * Need to round up for the last dword in the
880 			 * packet.
881 			 */
882 			w = (len + 3) >> 2;
883 			qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
884 			piobuf += w - 1;
885 			last = ((u32 *) ss->sge.vaddr)[w - 1];
886 			break;
887 		} else {
888 			u32 w = len >> 2;
889 
890 			qib_pio_copy(piobuf, ss->sge.vaddr, w);
891 			piobuf += w;
892 
893 			extra = len & (sizeof(u32) - 1);
894 			if (extra) {
895 				u32 v = ((u32 *) ss->sge.vaddr)[w];
896 
897 				/* Clear unused upper bytes */
898 				data = clear_upper_bytes(v, extra, 0);
899 			}
900 		}
901 		update_sge(ss, len);
902 		length -= len;
903 	}
904 	/* Update address before sending packet. */
905 	update_sge(ss, length);
906 	if (flush_wc) {
907 		/* must flush early everything before trigger word */
908 		qib_flush_wc();
909 		__raw_writel(last, piobuf);
910 		/* be sure trigger word is written */
911 		qib_flush_wc();
912 	} else
913 		__raw_writel(last, piobuf);
914 }
915 
__get_txreq(struct qib_ibdev * dev,struct qib_qp * qp)916 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
917 					   struct qib_qp *qp)
918 {
919 	struct qib_verbs_txreq *tx;
920 	unsigned long flags;
921 
922 	spin_lock_irqsave(&qp->s_lock, flags);
923 	spin_lock(&dev->pending_lock);
924 
925 	if (!list_empty(&dev->txreq_free)) {
926 		struct list_head *l = dev->txreq_free.next;
927 
928 		list_del(l);
929 		spin_unlock(&dev->pending_lock);
930 		spin_unlock_irqrestore(&qp->s_lock, flags);
931 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
932 	} else {
933 		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
934 		    list_empty(&qp->iowait)) {
935 			dev->n_txwait++;
936 			qp->s_flags |= QIB_S_WAIT_TX;
937 			list_add_tail(&qp->iowait, &dev->txwait);
938 		}
939 		qp->s_flags &= ~QIB_S_BUSY;
940 		spin_unlock(&dev->pending_lock);
941 		spin_unlock_irqrestore(&qp->s_lock, flags);
942 		tx = ERR_PTR(-EBUSY);
943 	}
944 	return tx;
945 }
946 
get_txreq(struct qib_ibdev * dev,struct qib_qp * qp)947 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
948 					 struct qib_qp *qp)
949 {
950 	struct qib_verbs_txreq *tx;
951 	unsigned long flags;
952 
953 	spin_lock_irqsave(&dev->pending_lock, flags);
954 	/* assume the list non empty */
955 	if (likely(!list_empty(&dev->txreq_free))) {
956 		struct list_head *l = dev->txreq_free.next;
957 
958 		list_del(l);
959 		spin_unlock_irqrestore(&dev->pending_lock, flags);
960 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
961 	} else {
962 		/* call slow path to get the extra lock */
963 		spin_unlock_irqrestore(&dev->pending_lock, flags);
964 		tx =  __get_txreq(dev, qp);
965 	}
966 	return tx;
967 }
968 
qib_put_txreq(struct qib_verbs_txreq * tx)969 void qib_put_txreq(struct qib_verbs_txreq *tx)
970 {
971 	struct qib_ibdev *dev;
972 	struct qib_qp *qp;
973 	unsigned long flags;
974 
975 	qp = tx->qp;
976 	dev = to_idev(qp->ibqp.device);
977 
978 	if (atomic_dec_and_test(&qp->refcount))
979 		wake_up(&qp->wait);
980 	if (tx->mr) {
981 		atomic_dec(&tx->mr->refcount);
982 		tx->mr = NULL;
983 	}
984 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
985 		tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
986 		dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
987 				 tx->txreq.addr, tx->hdr_dwords << 2,
988 				 DMA_TO_DEVICE);
989 		kfree(tx->align_buf);
990 	}
991 
992 	spin_lock_irqsave(&dev->pending_lock, flags);
993 
994 	/* Put struct back on free list */
995 	list_add(&tx->txreq.list, &dev->txreq_free);
996 
997 	if (!list_empty(&dev->txwait)) {
998 		/* Wake up first QP wanting a free struct */
999 		qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
1000 		list_del_init(&qp->iowait);
1001 		atomic_inc(&qp->refcount);
1002 		spin_unlock_irqrestore(&dev->pending_lock, flags);
1003 
1004 		spin_lock_irqsave(&qp->s_lock, flags);
1005 		if (qp->s_flags & QIB_S_WAIT_TX) {
1006 			qp->s_flags &= ~QIB_S_WAIT_TX;
1007 			qib_schedule_send(qp);
1008 		}
1009 		spin_unlock_irqrestore(&qp->s_lock, flags);
1010 
1011 		if (atomic_dec_and_test(&qp->refcount))
1012 			wake_up(&qp->wait);
1013 	} else
1014 		spin_unlock_irqrestore(&dev->pending_lock, flags);
1015 }
1016 
1017 /*
1018  * This is called when there are send DMA descriptors that might be
1019  * available.
1020  *
1021  * This is called with ppd->sdma_lock held.
1022  */
qib_verbs_sdma_desc_avail(struct qib_pportdata * ppd,unsigned avail)1023 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1024 {
1025 	struct qib_qp *qp, *nqp;
1026 	struct qib_qp *qps[20];
1027 	struct qib_ibdev *dev;
1028 	unsigned i, n;
1029 
1030 	n = 0;
1031 	dev = &ppd->dd->verbs_dev;
1032 	spin_lock(&dev->pending_lock);
1033 
1034 	/* Search wait list for first QP wanting DMA descriptors. */
1035 	list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1036 		if (qp->port_num != ppd->port)
1037 			continue;
1038 		if (n == ARRAY_SIZE(qps))
1039 			break;
1040 		if (qp->s_tx->txreq.sg_count > avail)
1041 			break;
1042 		avail -= qp->s_tx->txreq.sg_count;
1043 		list_del_init(&qp->iowait);
1044 		atomic_inc(&qp->refcount);
1045 		qps[n++] = qp;
1046 	}
1047 
1048 	spin_unlock(&dev->pending_lock);
1049 
1050 	for (i = 0; i < n; i++) {
1051 		qp = qps[i];
1052 		spin_lock(&qp->s_lock);
1053 		if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1054 			qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1055 			qib_schedule_send(qp);
1056 		}
1057 		spin_unlock(&qp->s_lock);
1058 		if (atomic_dec_and_test(&qp->refcount))
1059 			wake_up(&qp->wait);
1060 	}
1061 }
1062 
1063 /*
1064  * This is called with ppd->sdma_lock held.
1065  */
sdma_complete(struct qib_sdma_txreq * cookie,int status)1066 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1067 {
1068 	struct qib_verbs_txreq *tx =
1069 		container_of(cookie, struct qib_verbs_txreq, txreq);
1070 	struct qib_qp *qp = tx->qp;
1071 
1072 	spin_lock(&qp->s_lock);
1073 	if (tx->wqe)
1074 		qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1075 	else if (qp->ibqp.qp_type == IB_QPT_RC) {
1076 		struct qib_ib_header *hdr;
1077 
1078 		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1079 			hdr = &tx->align_buf->hdr;
1080 		else {
1081 			struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1082 
1083 			hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1084 		}
1085 		qib_rc_send_complete(qp, hdr);
1086 	}
1087 	if (atomic_dec_and_test(&qp->s_dma_busy)) {
1088 		if (qp->state == IB_QPS_RESET)
1089 			wake_up(&qp->wait_dma);
1090 		else if (qp->s_flags & QIB_S_WAIT_DMA) {
1091 			qp->s_flags &= ~QIB_S_WAIT_DMA;
1092 			qib_schedule_send(qp);
1093 		}
1094 	}
1095 	spin_unlock(&qp->s_lock);
1096 
1097 	qib_put_txreq(tx);
1098 }
1099 
wait_kmem(struct qib_ibdev * dev,struct qib_qp * qp)1100 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1101 {
1102 	unsigned long flags;
1103 	int ret = 0;
1104 
1105 	spin_lock_irqsave(&qp->s_lock, flags);
1106 	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1107 		spin_lock(&dev->pending_lock);
1108 		if (list_empty(&qp->iowait)) {
1109 			if (list_empty(&dev->memwait))
1110 				mod_timer(&dev->mem_timer, jiffies + 1);
1111 			qp->s_flags |= QIB_S_WAIT_KMEM;
1112 			list_add_tail(&qp->iowait, &dev->memwait);
1113 		}
1114 		spin_unlock(&dev->pending_lock);
1115 		qp->s_flags &= ~QIB_S_BUSY;
1116 		ret = -EBUSY;
1117 	}
1118 	spin_unlock_irqrestore(&qp->s_lock, flags);
1119 
1120 	return ret;
1121 }
1122 
qib_verbs_send_dma(struct qib_qp * qp,struct qib_ib_header * hdr,u32 hdrwords,struct qib_sge_state * ss,u32 len,u32 plen,u32 dwords)1123 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1124 			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
1125 			      u32 plen, u32 dwords)
1126 {
1127 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1128 	struct qib_devdata *dd = dd_from_dev(dev);
1129 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1130 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1131 	struct qib_verbs_txreq *tx;
1132 	struct qib_pio_header *phdr;
1133 	u32 control;
1134 	u32 ndesc;
1135 	int ret;
1136 
1137 	tx = qp->s_tx;
1138 	if (tx) {
1139 		qp->s_tx = NULL;
1140 		/* resend previously constructed packet */
1141 		ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1142 		goto bail;
1143 	}
1144 
1145 	tx = get_txreq(dev, qp);
1146 	if (IS_ERR(tx))
1147 		goto bail_tx;
1148 
1149 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1150 				       be16_to_cpu(hdr->lrh[0]) >> 12);
1151 	tx->qp = qp;
1152 	atomic_inc(&qp->refcount);
1153 	tx->wqe = qp->s_wqe;
1154 	tx->mr = qp->s_rdma_mr;
1155 	if (qp->s_rdma_mr)
1156 		qp->s_rdma_mr = NULL;
1157 	tx->txreq.callback = sdma_complete;
1158 	if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1159 		tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1160 	else
1161 		tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1162 	if (plen + 1 > dd->piosize2kmax_dwords)
1163 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1164 
1165 	if (len) {
1166 		/*
1167 		 * Don't try to DMA if it takes more descriptors than
1168 		 * the queue holds.
1169 		 */
1170 		ndesc = qib_count_sge(ss, len);
1171 		if (ndesc >= ppd->sdma_descq_cnt)
1172 			ndesc = 0;
1173 	} else
1174 		ndesc = 1;
1175 	if (ndesc) {
1176 		phdr = &dev->pio_hdrs[tx->hdr_inx];
1177 		phdr->pbc[0] = cpu_to_le32(plen);
1178 		phdr->pbc[1] = cpu_to_le32(control);
1179 		memcpy(&phdr->hdr, hdr, hdrwords << 2);
1180 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1181 		tx->txreq.sg_count = ndesc;
1182 		tx->txreq.addr = dev->pio_hdrs_phys +
1183 			tx->hdr_inx * sizeof(struct qib_pio_header);
1184 		tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1185 		ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1186 		goto bail;
1187 	}
1188 
1189 	/* Allocate a buffer and copy the header and payload to it. */
1190 	tx->hdr_dwords = plen + 1;
1191 	phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1192 	if (!phdr)
1193 		goto err_tx;
1194 	phdr->pbc[0] = cpu_to_le32(plen);
1195 	phdr->pbc[1] = cpu_to_le32(control);
1196 	memcpy(&phdr->hdr, hdr, hdrwords << 2);
1197 	qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1198 
1199 	tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1200 					tx->hdr_dwords << 2, DMA_TO_DEVICE);
1201 	if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1202 		goto map_err;
1203 	tx->align_buf = phdr;
1204 	tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1205 	tx->txreq.sg_count = 1;
1206 	ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1207 	goto unaligned;
1208 
1209 map_err:
1210 	kfree(phdr);
1211 err_tx:
1212 	qib_put_txreq(tx);
1213 	ret = wait_kmem(dev, qp);
1214 unaligned:
1215 	ibp->n_unaligned++;
1216 bail:
1217 	return ret;
1218 bail_tx:
1219 	ret = PTR_ERR(tx);
1220 	goto bail;
1221 }
1222 
1223 /*
1224  * If we are now in the error state, return zero to flush the
1225  * send work request.
1226  */
no_bufs_available(struct qib_qp * qp)1227 static int no_bufs_available(struct qib_qp *qp)
1228 {
1229 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1230 	struct qib_devdata *dd;
1231 	unsigned long flags;
1232 	int ret = 0;
1233 
1234 	/*
1235 	 * Note that as soon as want_buffer() is called and
1236 	 * possibly before it returns, qib_ib_piobufavail()
1237 	 * could be called. Therefore, put QP on the I/O wait list before
1238 	 * enabling the PIO avail interrupt.
1239 	 */
1240 	spin_lock_irqsave(&qp->s_lock, flags);
1241 	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1242 		spin_lock(&dev->pending_lock);
1243 		if (list_empty(&qp->iowait)) {
1244 			dev->n_piowait++;
1245 			qp->s_flags |= QIB_S_WAIT_PIO;
1246 			list_add_tail(&qp->iowait, &dev->piowait);
1247 			dd = dd_from_dev(dev);
1248 			dd->f_wantpiobuf_intr(dd, 1);
1249 		}
1250 		spin_unlock(&dev->pending_lock);
1251 		qp->s_flags &= ~QIB_S_BUSY;
1252 		ret = -EBUSY;
1253 	}
1254 	spin_unlock_irqrestore(&qp->s_lock, flags);
1255 	return ret;
1256 }
1257 
qib_verbs_send_pio(struct qib_qp * qp,struct qib_ib_header * ibhdr,u32 hdrwords,struct qib_sge_state * ss,u32 len,u32 plen,u32 dwords)1258 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1259 			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
1260 			      u32 plen, u32 dwords)
1261 {
1262 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1263 	struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1264 	u32 *hdr = (u32 *) ibhdr;
1265 	u32 __iomem *piobuf_orig;
1266 	u32 __iomem *piobuf;
1267 	u64 pbc;
1268 	unsigned long flags;
1269 	unsigned flush_wc;
1270 	u32 control;
1271 	u32 pbufn;
1272 
1273 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1274 		be16_to_cpu(ibhdr->lrh[0]) >> 12);
1275 	pbc = ((u64) control << 32) | plen;
1276 	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1277 	if (unlikely(piobuf == NULL))
1278 		return no_bufs_available(qp);
1279 
1280 	/*
1281 	 * Write the pbc.
1282 	 * We have to flush after the PBC for correctness on some cpus
1283 	 * or WC buffer can be written out of order.
1284 	 */
1285 	writeq(pbc, piobuf);
1286 	piobuf_orig = piobuf;
1287 	piobuf += 2;
1288 
1289 	flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1290 	if (len == 0) {
1291 		/*
1292 		 * If there is just the header portion, must flush before
1293 		 * writing last word of header for correctness, and after
1294 		 * the last header word (trigger word).
1295 		 */
1296 		if (flush_wc) {
1297 			qib_flush_wc();
1298 			qib_pio_copy(piobuf, hdr, hdrwords - 1);
1299 			qib_flush_wc();
1300 			__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1301 			qib_flush_wc();
1302 		} else
1303 			qib_pio_copy(piobuf, hdr, hdrwords);
1304 		goto done;
1305 	}
1306 
1307 	if (flush_wc)
1308 		qib_flush_wc();
1309 	qib_pio_copy(piobuf, hdr, hdrwords);
1310 	piobuf += hdrwords;
1311 
1312 	/* The common case is aligned and contained in one segment. */
1313 	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1314 		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1315 		u32 *addr = (u32 *) ss->sge.vaddr;
1316 
1317 		/* Update address before sending packet. */
1318 		update_sge(ss, len);
1319 		if (flush_wc) {
1320 			qib_pio_copy(piobuf, addr, dwords - 1);
1321 			/* must flush early everything before trigger word */
1322 			qib_flush_wc();
1323 			__raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1324 			/* be sure trigger word is written */
1325 			qib_flush_wc();
1326 		} else
1327 			qib_pio_copy(piobuf, addr, dwords);
1328 		goto done;
1329 	}
1330 	copy_io(piobuf, ss, len, flush_wc);
1331 done:
1332 	if (dd->flags & QIB_USE_SPCL_TRIG) {
1333 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1334 		qib_flush_wc();
1335 		__raw_writel(0xaebecede, piobuf_orig + spcl_off);
1336 	}
1337 	qib_sendbuf_done(dd, pbufn);
1338 	if (qp->s_rdma_mr) {
1339 		atomic_dec(&qp->s_rdma_mr->refcount);
1340 		qp->s_rdma_mr = NULL;
1341 	}
1342 	if (qp->s_wqe) {
1343 		spin_lock_irqsave(&qp->s_lock, flags);
1344 		qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1345 		spin_unlock_irqrestore(&qp->s_lock, flags);
1346 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
1347 		spin_lock_irqsave(&qp->s_lock, flags);
1348 		qib_rc_send_complete(qp, ibhdr);
1349 		spin_unlock_irqrestore(&qp->s_lock, flags);
1350 	}
1351 	return 0;
1352 }
1353 
1354 /**
1355  * qib_verbs_send - send a packet
1356  * @qp: the QP to send on
1357  * @hdr: the packet header
1358  * @hdrwords: the number of 32-bit words in the header
1359  * @ss: the SGE to send
1360  * @len: the length of the packet in bytes
1361  *
1362  * Return zero if packet is sent or queued OK.
1363  * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1364  */
qib_verbs_send(struct qib_qp * qp,struct qib_ib_header * hdr,u32 hdrwords,struct qib_sge_state * ss,u32 len)1365 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1366 		   u32 hdrwords, struct qib_sge_state *ss, u32 len)
1367 {
1368 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1369 	u32 plen;
1370 	int ret;
1371 	u32 dwords = (len + 3) >> 2;
1372 
1373 	/*
1374 	 * Calculate the send buffer trigger address.
1375 	 * The +1 counts for the pbc control dword following the pbc length.
1376 	 */
1377 	plen = hdrwords + dwords + 1;
1378 
1379 	/*
1380 	 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1381 	 * can defer SDMA restart until link goes ACTIVE without
1382 	 * worrying about just how we got there.
1383 	 */
1384 	if (qp->ibqp.qp_type == IB_QPT_SMI ||
1385 	    !(dd->flags & QIB_HAS_SEND_DMA))
1386 		ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1387 					 plen, dwords);
1388 	else
1389 		ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1390 					 plen, dwords);
1391 
1392 	return ret;
1393 }
1394 
qib_snapshot_counters(struct qib_pportdata * ppd,u64 * swords,u64 * rwords,u64 * spkts,u64 * rpkts,u64 * xmit_wait)1395 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1396 			  u64 *rwords, u64 *spkts, u64 *rpkts,
1397 			  u64 *xmit_wait)
1398 {
1399 	int ret;
1400 	struct qib_devdata *dd = ppd->dd;
1401 
1402 	if (!(dd->flags & QIB_PRESENT)) {
1403 		/* no hardware, freeze, etc. */
1404 		ret = -EINVAL;
1405 		goto bail;
1406 	}
1407 	*swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1408 	*rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1409 	*spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1410 	*rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1411 	*xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1412 
1413 	ret = 0;
1414 
1415 bail:
1416 	return ret;
1417 }
1418 
1419 /**
1420  * qib_get_counters - get various chip counters
1421  * @dd: the qlogic_ib device
1422  * @cntrs: counters are placed here
1423  *
1424  * Return the counters needed by recv_pma_get_portcounters().
1425  */
qib_get_counters(struct qib_pportdata * ppd,struct qib_verbs_counters * cntrs)1426 int qib_get_counters(struct qib_pportdata *ppd,
1427 		     struct qib_verbs_counters *cntrs)
1428 {
1429 	int ret;
1430 
1431 	if (!(ppd->dd->flags & QIB_PRESENT)) {
1432 		/* no hardware, freeze, etc. */
1433 		ret = -EINVAL;
1434 		goto bail;
1435 	}
1436 	cntrs->symbol_error_counter =
1437 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1438 	cntrs->link_error_recovery_counter =
1439 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1440 	/*
1441 	 * The link downed counter counts when the other side downs the
1442 	 * connection.  We add in the number of times we downed the link
1443 	 * due to local link integrity errors to compensate.
1444 	 */
1445 	cntrs->link_downed_counter =
1446 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1447 	cntrs->port_rcv_errors =
1448 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1449 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1450 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1451 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1452 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1453 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1454 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1455 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1456 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1457 	cntrs->port_rcv_errors +=
1458 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1459 	cntrs->port_rcv_errors +=
1460 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1461 	cntrs->port_rcv_remphys_errors =
1462 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1463 	cntrs->port_xmit_discards =
1464 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1465 	cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1466 			QIBPORTCNTR_WORDSEND);
1467 	cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1468 			QIBPORTCNTR_WORDRCV);
1469 	cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1470 			QIBPORTCNTR_PKTSEND);
1471 	cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1472 			QIBPORTCNTR_PKTRCV);
1473 	cntrs->local_link_integrity_errors =
1474 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1475 	cntrs->excessive_buffer_overrun_errors =
1476 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1477 	cntrs->vl15_dropped =
1478 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1479 
1480 	ret = 0;
1481 
1482 bail:
1483 	return ret;
1484 }
1485 
1486 /**
1487  * qib_ib_piobufavail - callback when a PIO buffer is available
1488  * @dd: the device pointer
1489  *
1490  * This is called from qib_intr() at interrupt level when a PIO buffer is
1491  * available after qib_verbs_send() returned an error that no buffers were
1492  * available. Disable the interrupt if there are no more QPs waiting.
1493  */
qib_ib_piobufavail(struct qib_devdata * dd)1494 void qib_ib_piobufavail(struct qib_devdata *dd)
1495 {
1496 	struct qib_ibdev *dev = &dd->verbs_dev;
1497 	struct list_head *list;
1498 	struct qib_qp *qps[5];
1499 	struct qib_qp *qp;
1500 	unsigned long flags;
1501 	unsigned i, n;
1502 
1503 	list = &dev->piowait;
1504 	n = 0;
1505 
1506 	/*
1507 	 * Note: checking that the piowait list is empty and clearing
1508 	 * the buffer available interrupt needs to be atomic or we
1509 	 * could end up with QPs on the wait list with the interrupt
1510 	 * disabled.
1511 	 */
1512 	spin_lock_irqsave(&dev->pending_lock, flags);
1513 	while (!list_empty(list)) {
1514 		if (n == ARRAY_SIZE(qps))
1515 			goto full;
1516 		qp = list_entry(list->next, struct qib_qp, iowait);
1517 		list_del_init(&qp->iowait);
1518 		atomic_inc(&qp->refcount);
1519 		qps[n++] = qp;
1520 	}
1521 	dd->f_wantpiobuf_intr(dd, 0);
1522 full:
1523 	spin_unlock_irqrestore(&dev->pending_lock, flags);
1524 
1525 	for (i = 0; i < n; i++) {
1526 		qp = qps[i];
1527 
1528 		spin_lock_irqsave(&qp->s_lock, flags);
1529 		if (qp->s_flags & QIB_S_WAIT_PIO) {
1530 			qp->s_flags &= ~QIB_S_WAIT_PIO;
1531 			qib_schedule_send(qp);
1532 		}
1533 		spin_unlock_irqrestore(&qp->s_lock, flags);
1534 
1535 		/* Notify qib_destroy_qp() if it is waiting. */
1536 		if (atomic_dec_and_test(&qp->refcount))
1537 			wake_up(&qp->wait);
1538 	}
1539 }
1540 
qib_query_device(struct ib_device * ibdev,struct ib_device_attr * props)1541 static int qib_query_device(struct ib_device *ibdev,
1542 			    struct ib_device_attr *props)
1543 {
1544 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1545 	struct qib_ibdev *dev = to_idev(ibdev);
1546 
1547 	memset(props, 0, sizeof(*props));
1548 
1549 	props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1550 		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1551 		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1552 		IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1553 	props->page_size_cap = PAGE_SIZE;
1554 	props->vendor_id =
1555 		QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1556 	props->vendor_part_id = dd->deviceid;
1557 	props->hw_ver = dd->minrev;
1558 	props->sys_image_guid = ib_qib_sys_image_guid;
1559 	props->max_mr_size = ~0ULL;
1560 	props->max_qp = ib_qib_max_qps;
1561 	props->max_qp_wr = ib_qib_max_qp_wrs;
1562 	props->max_sge = ib_qib_max_sges;
1563 	props->max_cq = ib_qib_max_cqs;
1564 	props->max_ah = ib_qib_max_ahs;
1565 	props->max_cqe = ib_qib_max_cqes;
1566 	props->max_mr = dev->lk_table.max;
1567 	props->max_fmr = dev->lk_table.max;
1568 	props->max_map_per_fmr = 32767;
1569 	props->max_pd = ib_qib_max_pds;
1570 	props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1571 	props->max_qp_init_rd_atom = 255;
1572 	/* props->max_res_rd_atom */
1573 	props->max_srq = ib_qib_max_srqs;
1574 	props->max_srq_wr = ib_qib_max_srq_wrs;
1575 	props->max_srq_sge = ib_qib_max_srq_sges;
1576 	/* props->local_ca_ack_delay */
1577 	props->atomic_cap = IB_ATOMIC_GLOB;
1578 	props->max_pkeys = qib_get_npkeys(dd);
1579 	props->max_mcast_grp = ib_qib_max_mcast_grps;
1580 	props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1581 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1582 		props->max_mcast_grp;
1583 
1584 	return 0;
1585 }
1586 
qib_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)1587 static int qib_query_port(struct ib_device *ibdev, u8 port,
1588 			  struct ib_port_attr *props)
1589 {
1590 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1591 	struct qib_ibport *ibp = to_iport(ibdev, port);
1592 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1593 	enum ib_mtu mtu;
1594 	u16 lid = ppd->lid;
1595 
1596 	memset(props, 0, sizeof(*props));
1597 	props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1598 	props->lmc = ppd->lmc;
1599 	props->sm_lid = ibp->sm_lid;
1600 	props->sm_sl = ibp->sm_sl;
1601 	props->state = dd->f_iblink_state(ppd->lastibcstat);
1602 	props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1603 	props->port_cap_flags = ibp->port_cap_flags;
1604 	props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1605 	props->max_msg_sz = 0x80000000;
1606 	props->pkey_tbl_len = qib_get_npkeys(dd);
1607 	props->bad_pkey_cntr = ibp->pkey_violations;
1608 	props->qkey_viol_cntr = ibp->qkey_violations;
1609 	props->active_width = ppd->link_width_active;
1610 	/* See rate_show() */
1611 	props->active_speed = ppd->link_speed_active;
1612 	props->max_vl_num = qib_num_vls(ppd->vls_supported);
1613 	props->init_type_reply = 0;
1614 
1615 	props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1616 	switch (ppd->ibmtu) {
1617 	case 4096:
1618 		mtu = IB_MTU_4096;
1619 		break;
1620 	case 2048:
1621 		mtu = IB_MTU_2048;
1622 		break;
1623 	case 1024:
1624 		mtu = IB_MTU_1024;
1625 		break;
1626 	case 512:
1627 		mtu = IB_MTU_512;
1628 		break;
1629 	case 256:
1630 		mtu = IB_MTU_256;
1631 		break;
1632 	default:
1633 		mtu = IB_MTU_2048;
1634 	}
1635 	props->active_mtu = mtu;
1636 	props->subnet_timeout = ibp->subnet_timeout;
1637 
1638 	return 0;
1639 }
1640 
qib_modify_device(struct ib_device * device,int device_modify_mask,struct ib_device_modify * device_modify)1641 static int qib_modify_device(struct ib_device *device,
1642 			     int device_modify_mask,
1643 			     struct ib_device_modify *device_modify)
1644 {
1645 	struct qib_devdata *dd = dd_from_ibdev(device);
1646 	unsigned i;
1647 	int ret;
1648 
1649 	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1650 				   IB_DEVICE_MODIFY_NODE_DESC)) {
1651 		ret = -EOPNOTSUPP;
1652 		goto bail;
1653 	}
1654 
1655 	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1656 		memcpy(device->node_desc, device_modify->node_desc, 64);
1657 		for (i = 0; i < dd->num_pports; i++) {
1658 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1659 
1660 			qib_node_desc_chg(ibp);
1661 		}
1662 	}
1663 
1664 	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1665 		ib_qib_sys_image_guid =
1666 			cpu_to_be64(device_modify->sys_image_guid);
1667 		for (i = 0; i < dd->num_pports; i++) {
1668 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1669 
1670 			qib_sys_guid_chg(ibp);
1671 		}
1672 	}
1673 
1674 	ret = 0;
1675 
1676 bail:
1677 	return ret;
1678 }
1679 
qib_modify_port(struct ib_device * ibdev,u8 port,int port_modify_mask,struct ib_port_modify * props)1680 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1681 			   int port_modify_mask, struct ib_port_modify *props)
1682 {
1683 	struct qib_ibport *ibp = to_iport(ibdev, port);
1684 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1685 
1686 	ibp->port_cap_flags |= props->set_port_cap_mask;
1687 	ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1688 	if (props->set_port_cap_mask || props->clr_port_cap_mask)
1689 		qib_cap_mask_chg(ibp);
1690 	if (port_modify_mask & IB_PORT_SHUTDOWN)
1691 		qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1692 	if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1693 		ibp->qkey_violations = 0;
1694 	return 0;
1695 }
1696 
qib_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)1697 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1698 			 int index, union ib_gid *gid)
1699 {
1700 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1701 	int ret = 0;
1702 
1703 	if (!port || port > dd->num_pports)
1704 		ret = -EINVAL;
1705 	else {
1706 		struct qib_ibport *ibp = to_iport(ibdev, port);
1707 		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1708 
1709 		gid->global.subnet_prefix = ibp->gid_prefix;
1710 		if (index == 0)
1711 			gid->global.interface_id = ppd->guid;
1712 		else if (index < QIB_GUIDS_PER_PORT)
1713 			gid->global.interface_id = ibp->guids[index - 1];
1714 		else
1715 			ret = -EINVAL;
1716 	}
1717 
1718 	return ret;
1719 }
1720 
qib_alloc_pd(struct ib_device * ibdev,struct ib_ucontext * context,struct ib_udata * udata)1721 static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1722 				  struct ib_ucontext *context,
1723 				  struct ib_udata *udata)
1724 {
1725 	struct qib_ibdev *dev = to_idev(ibdev);
1726 	struct qib_pd *pd;
1727 	struct ib_pd *ret;
1728 
1729 	/*
1730 	 * This is actually totally arbitrary.  Some correctness tests
1731 	 * assume there's a maximum number of PDs that can be allocated.
1732 	 * We don't actually have this limit, but we fail the test if
1733 	 * we allow allocations of more than we report for this value.
1734 	 */
1735 
1736 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
1737 	if (!pd) {
1738 		ret = ERR_PTR(-ENOMEM);
1739 		goto bail;
1740 	}
1741 
1742 	spin_lock(&dev->n_pds_lock);
1743 	if (dev->n_pds_allocated == ib_qib_max_pds) {
1744 		spin_unlock(&dev->n_pds_lock);
1745 		kfree(pd);
1746 		ret = ERR_PTR(-ENOMEM);
1747 		goto bail;
1748 	}
1749 
1750 	dev->n_pds_allocated++;
1751 	spin_unlock(&dev->n_pds_lock);
1752 
1753 	/* ib_alloc_pd() will initialize pd->ibpd. */
1754 	pd->user = udata != NULL;
1755 
1756 	ret = &pd->ibpd;
1757 
1758 bail:
1759 	return ret;
1760 }
1761 
qib_dealloc_pd(struct ib_pd * ibpd)1762 static int qib_dealloc_pd(struct ib_pd *ibpd)
1763 {
1764 	struct qib_pd *pd = to_ipd(ibpd);
1765 	struct qib_ibdev *dev = to_idev(ibpd->device);
1766 
1767 	spin_lock(&dev->n_pds_lock);
1768 	dev->n_pds_allocated--;
1769 	spin_unlock(&dev->n_pds_lock);
1770 
1771 	kfree(pd);
1772 
1773 	return 0;
1774 }
1775 
qib_check_ah(struct ib_device * ibdev,struct ib_ah_attr * ah_attr)1776 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1777 {
1778 	/* A multicast address requires a GRH (see ch. 8.4.1). */
1779 	if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1780 	    ah_attr->dlid != QIB_PERMISSIVE_LID &&
1781 	    !(ah_attr->ah_flags & IB_AH_GRH))
1782 		goto bail;
1783 	if ((ah_attr->ah_flags & IB_AH_GRH) &&
1784 	    ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1785 		goto bail;
1786 	if (ah_attr->dlid == 0)
1787 		goto bail;
1788 	if (ah_attr->port_num < 1 ||
1789 	    ah_attr->port_num > ibdev->phys_port_cnt)
1790 		goto bail;
1791 	if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1792 	    ib_rate_to_mult(ah_attr->static_rate) < 0)
1793 		goto bail;
1794 	if (ah_attr->sl > 15)
1795 		goto bail;
1796 	return 0;
1797 bail:
1798 	return -EINVAL;
1799 }
1800 
1801 /**
1802  * qib_create_ah - create an address handle
1803  * @pd: the protection domain
1804  * @ah_attr: the attributes of the AH
1805  *
1806  * This may be called from interrupt context.
1807  */
qib_create_ah(struct ib_pd * pd,struct ib_ah_attr * ah_attr)1808 static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1809 				   struct ib_ah_attr *ah_attr)
1810 {
1811 	struct qib_ah *ah;
1812 	struct ib_ah *ret;
1813 	struct qib_ibdev *dev = to_idev(pd->device);
1814 	unsigned long flags;
1815 
1816 	if (qib_check_ah(pd->device, ah_attr)) {
1817 		ret = ERR_PTR(-EINVAL);
1818 		goto bail;
1819 	}
1820 
1821 	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1822 	if (!ah) {
1823 		ret = ERR_PTR(-ENOMEM);
1824 		goto bail;
1825 	}
1826 
1827 	spin_lock_irqsave(&dev->n_ahs_lock, flags);
1828 	if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1829 		spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1830 		kfree(ah);
1831 		ret = ERR_PTR(-ENOMEM);
1832 		goto bail;
1833 	}
1834 
1835 	dev->n_ahs_allocated++;
1836 	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1837 
1838 	/* ib_create_ah() will initialize ah->ibah. */
1839 	ah->attr = *ah_attr;
1840 	atomic_set(&ah->refcount, 0);
1841 
1842 	ret = &ah->ibah;
1843 
1844 bail:
1845 	return ret;
1846 }
1847 
1848 /**
1849  * qib_destroy_ah - destroy an address handle
1850  * @ibah: the AH to destroy
1851  *
1852  * This may be called from interrupt context.
1853  */
qib_destroy_ah(struct ib_ah * ibah)1854 static int qib_destroy_ah(struct ib_ah *ibah)
1855 {
1856 	struct qib_ibdev *dev = to_idev(ibah->device);
1857 	struct qib_ah *ah = to_iah(ibah);
1858 	unsigned long flags;
1859 
1860 	if (atomic_read(&ah->refcount) != 0)
1861 		return -EBUSY;
1862 
1863 	spin_lock_irqsave(&dev->n_ahs_lock, flags);
1864 	dev->n_ahs_allocated--;
1865 	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1866 
1867 	kfree(ah);
1868 
1869 	return 0;
1870 }
1871 
qib_modify_ah(struct ib_ah * ibah,struct ib_ah_attr * ah_attr)1872 static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1873 {
1874 	struct qib_ah *ah = to_iah(ibah);
1875 
1876 	if (qib_check_ah(ibah->device, ah_attr))
1877 		return -EINVAL;
1878 
1879 	ah->attr = *ah_attr;
1880 
1881 	return 0;
1882 }
1883 
qib_query_ah(struct ib_ah * ibah,struct ib_ah_attr * ah_attr)1884 static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1885 {
1886 	struct qib_ah *ah = to_iah(ibah);
1887 
1888 	*ah_attr = ah->attr;
1889 
1890 	return 0;
1891 }
1892 
1893 /**
1894  * qib_get_npkeys - return the size of the PKEY table for context 0
1895  * @dd: the qlogic_ib device
1896  */
qib_get_npkeys(struct qib_devdata * dd)1897 unsigned qib_get_npkeys(struct qib_devdata *dd)
1898 {
1899 	return ARRAY_SIZE(dd->rcd[0]->pkeys);
1900 }
1901 
1902 /*
1903  * Return the indexed PKEY from the port PKEY table.
1904  * No need to validate rcd[ctxt]; the port is setup if we are here.
1905  */
qib_get_pkey(struct qib_ibport * ibp,unsigned index)1906 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1907 {
1908 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1909 	struct qib_devdata *dd = ppd->dd;
1910 	unsigned ctxt = ppd->hw_pidx;
1911 	unsigned ret;
1912 
1913 	/* dd->rcd null if mini_init or some init failures */
1914 	if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1915 		ret = 0;
1916 	else
1917 		ret = dd->rcd[ctxt]->pkeys[index];
1918 
1919 	return ret;
1920 }
1921 
qib_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)1922 static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1923 			  u16 *pkey)
1924 {
1925 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1926 	int ret;
1927 
1928 	if (index >= qib_get_npkeys(dd)) {
1929 		ret = -EINVAL;
1930 		goto bail;
1931 	}
1932 
1933 	*pkey = qib_get_pkey(to_iport(ibdev, port), index);
1934 	ret = 0;
1935 
1936 bail:
1937 	return ret;
1938 }
1939 
1940 /**
1941  * qib_alloc_ucontext - allocate a ucontest
1942  * @ibdev: the infiniband device
1943  * @udata: not used by the QLogic_IB driver
1944  */
1945 
qib_alloc_ucontext(struct ib_device * ibdev,struct ib_udata * udata)1946 static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1947 					      struct ib_udata *udata)
1948 {
1949 	struct qib_ucontext *context;
1950 	struct ib_ucontext *ret;
1951 
1952 	context = kmalloc(sizeof *context, GFP_KERNEL);
1953 	if (!context) {
1954 		ret = ERR_PTR(-ENOMEM);
1955 		goto bail;
1956 	}
1957 
1958 	ret = &context->ibucontext;
1959 
1960 bail:
1961 	return ret;
1962 }
1963 
qib_dealloc_ucontext(struct ib_ucontext * context)1964 static int qib_dealloc_ucontext(struct ib_ucontext *context)
1965 {
1966 	kfree(to_iucontext(context));
1967 	return 0;
1968 }
1969 
init_ibport(struct qib_pportdata * ppd)1970 static void init_ibport(struct qib_pportdata *ppd)
1971 {
1972 	struct qib_verbs_counters cntrs;
1973 	struct qib_ibport *ibp = &ppd->ibport_data;
1974 
1975 	spin_lock_init(&ibp->lock);
1976 	/* Set the prefix to the default value (see ch. 4.1.1) */
1977 	ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1978 	ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1979 	ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1980 		IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1981 		IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1982 		IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1983 		IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1984 	if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1985 		ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1986 	ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1987 	ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1988 	ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1989 	ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1990 	ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1991 
1992 	/* Snapshot current HW counters to "clear" them. */
1993 	qib_get_counters(ppd, &cntrs);
1994 	ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1995 	ibp->z_link_error_recovery_counter =
1996 		cntrs.link_error_recovery_counter;
1997 	ibp->z_link_downed_counter = cntrs.link_downed_counter;
1998 	ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1999 	ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
2000 	ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
2001 	ibp->z_port_xmit_data = cntrs.port_xmit_data;
2002 	ibp->z_port_rcv_data = cntrs.port_rcv_data;
2003 	ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
2004 	ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
2005 	ibp->z_local_link_integrity_errors =
2006 		cntrs.local_link_integrity_errors;
2007 	ibp->z_excessive_buffer_overrun_errors =
2008 		cntrs.excessive_buffer_overrun_errors;
2009 	ibp->z_vl15_dropped = cntrs.vl15_dropped;
2010 	RCU_INIT_POINTER(ibp->qp0, NULL);
2011 	RCU_INIT_POINTER(ibp->qp1, NULL);
2012 }
2013 
2014 /**
2015  * qib_register_ib_device - register our device with the infiniband core
2016  * @dd: the device data structure
2017  * Return the allocated qib_ibdev pointer or NULL on error.
2018  */
qib_register_ib_device(struct qib_devdata * dd)2019 int qib_register_ib_device(struct qib_devdata *dd)
2020 {
2021 	struct qib_ibdev *dev = &dd->verbs_dev;
2022 	struct ib_device *ibdev = &dev->ibdev;
2023 	struct qib_pportdata *ppd = dd->pport;
2024 	unsigned i, lk_tab_size;
2025 	int ret;
2026 
2027 	dev->qp_table_size = ib_qib_qp_table_size;
2028 	get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2029 	dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
2030 				GFP_KERNEL);
2031 	if (!dev->qp_table) {
2032 		ret = -ENOMEM;
2033 		goto err_qpt;
2034 	}
2035 	for (i = 0; i < dev->qp_table_size; i++)
2036 		RCU_INIT_POINTER(dev->qp_table[i], NULL);
2037 
2038 	for (i = 0; i < dd->num_pports; i++)
2039 		init_ibport(ppd + i);
2040 
2041 	/* Only need to initialize non-zero fields. */
2042 	spin_lock_init(&dev->qpt_lock);
2043 	spin_lock_init(&dev->n_pds_lock);
2044 	spin_lock_init(&dev->n_ahs_lock);
2045 	spin_lock_init(&dev->n_cqs_lock);
2046 	spin_lock_init(&dev->n_qps_lock);
2047 	spin_lock_init(&dev->n_srqs_lock);
2048 	spin_lock_init(&dev->n_mcast_grps_lock);
2049 	init_timer(&dev->mem_timer);
2050 	dev->mem_timer.function = mem_timer;
2051 	dev->mem_timer.data = (unsigned long) dev;
2052 
2053 	qib_init_qpn_table(dd, &dev->qpn_table);
2054 
2055 	/*
2056 	 * The top ib_qib_lkey_table_size bits are used to index the
2057 	 * table.  The lower 8 bits can be owned by the user (copied from
2058 	 * the LKEY).  The remaining bits act as a generation number or tag.
2059 	 */
2060 	spin_lock_init(&dev->lk_table.lock);
2061 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2062 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2063 	dev->lk_table.table = (struct qib_mregion **)
2064 		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2065 	if (dev->lk_table.table == NULL) {
2066 		ret = -ENOMEM;
2067 		goto err_lk;
2068 	}
2069 	memset(dev->lk_table.table, 0, lk_tab_size);
2070 	INIT_LIST_HEAD(&dev->pending_mmaps);
2071 	spin_lock_init(&dev->pending_lock);
2072 	dev->mmap_offset = PAGE_SIZE;
2073 	spin_lock_init(&dev->mmap_offset_lock);
2074 	INIT_LIST_HEAD(&dev->piowait);
2075 	INIT_LIST_HEAD(&dev->dmawait);
2076 	INIT_LIST_HEAD(&dev->txwait);
2077 	INIT_LIST_HEAD(&dev->memwait);
2078 	INIT_LIST_HEAD(&dev->txreq_free);
2079 
2080 	if (ppd->sdma_descq_cnt) {
2081 		dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2082 						ppd->sdma_descq_cnt *
2083 						sizeof(struct qib_pio_header),
2084 						&dev->pio_hdrs_phys,
2085 						GFP_KERNEL);
2086 		if (!dev->pio_hdrs) {
2087 			ret = -ENOMEM;
2088 			goto err_hdrs;
2089 		}
2090 	}
2091 
2092 	for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2093 		struct qib_verbs_txreq *tx;
2094 
2095 		tx = kzalloc(sizeof *tx, GFP_KERNEL);
2096 		if (!tx) {
2097 			ret = -ENOMEM;
2098 			goto err_tx;
2099 		}
2100 		tx->hdr_inx = i;
2101 		list_add(&tx->txreq.list, &dev->txreq_free);
2102 	}
2103 
2104 	/*
2105 	 * The system image GUID is supposed to be the same for all
2106 	 * IB HCAs in a single system but since there can be other
2107 	 * device types in the system, we can't be sure this is unique.
2108 	 */
2109 	if (!ib_qib_sys_image_guid)
2110 		ib_qib_sys_image_guid = ppd->guid;
2111 
2112 	strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2113 	ibdev->owner = THIS_MODULE;
2114 	ibdev->node_guid = ppd->guid;
2115 	ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2116 	ibdev->uverbs_cmd_mask =
2117 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2118 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2119 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2120 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2121 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2122 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
2123 		(1ull << IB_USER_VERBS_CMD_MODIFY_AH)           |
2124 		(1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
2125 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
2126 		(1ull << IB_USER_VERBS_CMD_REG_MR)              |
2127 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2128 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2129 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2130 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2131 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2132 		(1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
2133 		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
2134 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2135 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2136 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2137 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2138 		(1ull << IB_USER_VERBS_CMD_POST_SEND)           |
2139 		(1ull << IB_USER_VERBS_CMD_POST_RECV)           |
2140 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2141 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2142 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2143 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2144 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2145 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2146 		(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2147 	ibdev->node_type = RDMA_NODE_IB_CA;
2148 	ibdev->phys_port_cnt = dd->num_pports;
2149 	ibdev->num_comp_vectors = 1;
2150 	ibdev->dma_device = &dd->pcidev->dev;
2151 	ibdev->query_device = qib_query_device;
2152 	ibdev->modify_device = qib_modify_device;
2153 	ibdev->query_port = qib_query_port;
2154 	ibdev->modify_port = qib_modify_port;
2155 	ibdev->query_pkey = qib_query_pkey;
2156 	ibdev->query_gid = qib_query_gid;
2157 	ibdev->alloc_ucontext = qib_alloc_ucontext;
2158 	ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2159 	ibdev->alloc_pd = qib_alloc_pd;
2160 	ibdev->dealloc_pd = qib_dealloc_pd;
2161 	ibdev->create_ah = qib_create_ah;
2162 	ibdev->destroy_ah = qib_destroy_ah;
2163 	ibdev->modify_ah = qib_modify_ah;
2164 	ibdev->query_ah = qib_query_ah;
2165 	ibdev->create_srq = qib_create_srq;
2166 	ibdev->modify_srq = qib_modify_srq;
2167 	ibdev->query_srq = qib_query_srq;
2168 	ibdev->destroy_srq = qib_destroy_srq;
2169 	ibdev->create_qp = qib_create_qp;
2170 	ibdev->modify_qp = qib_modify_qp;
2171 	ibdev->query_qp = qib_query_qp;
2172 	ibdev->destroy_qp = qib_destroy_qp;
2173 	ibdev->post_send = qib_post_send;
2174 	ibdev->post_recv = qib_post_receive;
2175 	ibdev->post_srq_recv = qib_post_srq_receive;
2176 	ibdev->create_cq = qib_create_cq;
2177 	ibdev->destroy_cq = qib_destroy_cq;
2178 	ibdev->resize_cq = qib_resize_cq;
2179 	ibdev->poll_cq = qib_poll_cq;
2180 	ibdev->req_notify_cq = qib_req_notify_cq;
2181 	ibdev->get_dma_mr = qib_get_dma_mr;
2182 	ibdev->reg_phys_mr = qib_reg_phys_mr;
2183 	ibdev->reg_user_mr = qib_reg_user_mr;
2184 	ibdev->dereg_mr = qib_dereg_mr;
2185 	ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2186 	ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2187 	ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2188 	ibdev->alloc_fmr = qib_alloc_fmr;
2189 	ibdev->map_phys_fmr = qib_map_phys_fmr;
2190 	ibdev->unmap_fmr = qib_unmap_fmr;
2191 	ibdev->dealloc_fmr = qib_dealloc_fmr;
2192 	ibdev->attach_mcast = qib_multicast_attach;
2193 	ibdev->detach_mcast = qib_multicast_detach;
2194 	ibdev->process_mad = qib_process_mad;
2195 	ibdev->mmap = qib_mmap;
2196 	ibdev->dma_ops = &qib_dma_mapping_ops;
2197 
2198 	snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2199 		 QIB_IDSTR " %s", init_utsname()->nodename);
2200 
2201 	ret = ib_register_device(ibdev, qib_create_port_files);
2202 	if (ret)
2203 		goto err_reg;
2204 
2205 	ret = qib_create_agents(dev);
2206 	if (ret)
2207 		goto err_agents;
2208 
2209 	if (qib_verbs_register_sysfs(dd))
2210 		goto err_class;
2211 
2212 	goto bail;
2213 
2214 err_class:
2215 	qib_free_agents(dev);
2216 err_agents:
2217 	ib_unregister_device(ibdev);
2218 err_reg:
2219 err_tx:
2220 	while (!list_empty(&dev->txreq_free)) {
2221 		struct list_head *l = dev->txreq_free.next;
2222 		struct qib_verbs_txreq *tx;
2223 
2224 		list_del(l);
2225 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2226 		kfree(tx);
2227 	}
2228 	if (ppd->sdma_descq_cnt)
2229 		dma_free_coherent(&dd->pcidev->dev,
2230 				  ppd->sdma_descq_cnt *
2231 					sizeof(struct qib_pio_header),
2232 				  dev->pio_hdrs, dev->pio_hdrs_phys);
2233 err_hdrs:
2234 	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2235 err_lk:
2236 	kfree(dev->qp_table);
2237 err_qpt:
2238 	qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2239 bail:
2240 	return ret;
2241 }
2242 
qib_unregister_ib_device(struct qib_devdata * dd)2243 void qib_unregister_ib_device(struct qib_devdata *dd)
2244 {
2245 	struct qib_ibdev *dev = &dd->verbs_dev;
2246 	struct ib_device *ibdev = &dev->ibdev;
2247 	u32 qps_inuse;
2248 	unsigned lk_tab_size;
2249 
2250 	qib_verbs_unregister_sysfs(dd);
2251 
2252 	qib_free_agents(dev);
2253 
2254 	ib_unregister_device(ibdev);
2255 
2256 	if (!list_empty(&dev->piowait))
2257 		qib_dev_err(dd, "piowait list not empty!\n");
2258 	if (!list_empty(&dev->dmawait))
2259 		qib_dev_err(dd, "dmawait list not empty!\n");
2260 	if (!list_empty(&dev->txwait))
2261 		qib_dev_err(dd, "txwait list not empty!\n");
2262 	if (!list_empty(&dev->memwait))
2263 		qib_dev_err(dd, "memwait list not empty!\n");
2264 	if (dev->dma_mr)
2265 		qib_dev_err(dd, "DMA MR not NULL!\n");
2266 
2267 	qps_inuse = qib_free_all_qps(dd);
2268 	if (qps_inuse)
2269 		qib_dev_err(dd, "QP memory leak! %u still in use\n",
2270 			    qps_inuse);
2271 
2272 	del_timer_sync(&dev->mem_timer);
2273 	qib_free_qpn_table(&dev->qpn_table);
2274 	while (!list_empty(&dev->txreq_free)) {
2275 		struct list_head *l = dev->txreq_free.next;
2276 		struct qib_verbs_txreq *tx;
2277 
2278 		list_del(l);
2279 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2280 		kfree(tx);
2281 	}
2282 	if (dd->pport->sdma_descq_cnt)
2283 		dma_free_coherent(&dd->pcidev->dev,
2284 				  dd->pport->sdma_descq_cnt *
2285 					sizeof(struct qib_pio_header),
2286 				  dev->pio_hdrs, dev->pio_hdrs_phys);
2287 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2288 	free_pages((unsigned long) dev->lk_table.table,
2289 		   get_order(lk_tab_size));
2290 	kfree(dev->qp_table);
2291 }
2292