1 /*
2  * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "qib.h"
35 
36 /**
37  * qib_alloc_lkey - allocate an lkey
38  * @rkt: lkey table in which to allocate the lkey
39  * @mr: memory region that this lkey protects
40  *
41  * Returns 1 if successful, otherwise returns 0.
42  */
43 
qib_alloc_lkey(struct qib_lkey_table * rkt,struct qib_mregion * mr)44 int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
45 {
46 	unsigned long flags;
47 	u32 r;
48 	u32 n;
49 	int ret;
50 
51 	spin_lock_irqsave(&rkt->lock, flags);
52 
53 	/* Find the next available LKEY */
54 	r = rkt->next;
55 	n = r;
56 	for (;;) {
57 		if (rkt->table[r] == NULL)
58 			break;
59 		r = (r + 1) & (rkt->max - 1);
60 		if (r == n) {
61 			spin_unlock_irqrestore(&rkt->lock, flags);
62 			ret = 0;
63 			goto bail;
64 		}
65 	}
66 	rkt->next = (r + 1) & (rkt->max - 1);
67 	/*
68 	 * Make sure lkey is never zero which is reserved to indicate an
69 	 * unrestricted LKEY.
70 	 */
71 	rkt->gen++;
72 	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
73 		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
74 		 << 8);
75 	if (mr->lkey == 0) {
76 		mr->lkey |= 1 << 8;
77 		rkt->gen++;
78 	}
79 	rkt->table[r] = mr;
80 	spin_unlock_irqrestore(&rkt->lock, flags);
81 
82 	ret = 1;
83 
84 bail:
85 	return ret;
86 }
87 
88 /**
89  * qib_free_lkey - free an lkey
90  * @rkt: table from which to free the lkey
91  * @lkey: lkey id to free
92  */
qib_free_lkey(struct qib_ibdev * dev,struct qib_mregion * mr)93 int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
94 {
95 	unsigned long flags;
96 	u32 lkey = mr->lkey;
97 	u32 r;
98 	int ret;
99 
100 	spin_lock_irqsave(&dev->lk_table.lock, flags);
101 	if (lkey == 0) {
102 		if (dev->dma_mr && dev->dma_mr == mr) {
103 			ret = atomic_read(&dev->dma_mr->refcount);
104 			if (!ret)
105 				dev->dma_mr = NULL;
106 		} else
107 			ret = 0;
108 	} else {
109 		r = lkey >> (32 - ib_qib_lkey_table_size);
110 		ret = atomic_read(&dev->lk_table.table[r]->refcount);
111 		if (!ret)
112 			dev->lk_table.table[r] = NULL;
113 	}
114 	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
115 
116 	if (ret)
117 		ret = -EBUSY;
118 	return ret;
119 }
120 
121 /**
122  * qib_lkey_ok - check IB SGE for validity and initialize
123  * @rkt: table containing lkey to check SGE against
124  * @isge: outgoing internal SGE
125  * @sge: SGE to check
126  * @acc: access flags
127  *
128  * Return 1 if valid and successful, otherwise returns 0.
129  *
130  * Check the IB SGE for validity and initialize our internal version
131  * of it.
132  */
qib_lkey_ok(struct qib_lkey_table * rkt,struct qib_pd * pd,struct qib_sge * isge,struct ib_sge * sge,int acc)133 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
134 		struct qib_sge *isge, struct ib_sge *sge, int acc)
135 {
136 	struct qib_mregion *mr;
137 	unsigned n, m;
138 	size_t off;
139 	unsigned long flags;
140 
141 	/*
142 	 * We use LKEY == zero for kernel virtual addresses
143 	 * (see qib_get_dma_mr and qib_dma.c).
144 	 */
145 	spin_lock_irqsave(&rkt->lock, flags);
146 	if (sge->lkey == 0) {
147 		struct qib_ibdev *dev = to_idev(pd->ibpd.device);
148 
149 		if (pd->user)
150 			goto bail;
151 		if (!dev->dma_mr)
152 			goto bail;
153 		atomic_inc(&dev->dma_mr->refcount);
154 		spin_unlock_irqrestore(&rkt->lock, flags);
155 
156 		isge->mr = dev->dma_mr;
157 		isge->vaddr = (void *) sge->addr;
158 		isge->length = sge->length;
159 		isge->sge_length = sge->length;
160 		isge->m = 0;
161 		isge->n = 0;
162 		goto ok;
163 	}
164 	mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
165 	if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
166 		     mr->pd != &pd->ibpd))
167 		goto bail;
168 
169 	off = sge->addr - mr->user_base;
170 	if (unlikely(sge->addr < mr->user_base ||
171 		     off + sge->length > mr->length ||
172 		     (mr->access_flags & acc) != acc))
173 		goto bail;
174 	atomic_inc(&mr->refcount);
175 	spin_unlock_irqrestore(&rkt->lock, flags);
176 
177 	off += mr->offset;
178 	if (mr->page_shift) {
179 		/*
180 		page sizes are uniform power of 2 so no loop is necessary
181 		entries_spanned_by_off is the number of times the loop below
182 		would have executed.
183 		*/
184 		size_t entries_spanned_by_off;
185 
186 		entries_spanned_by_off = off >> mr->page_shift;
187 		off -= (entries_spanned_by_off << mr->page_shift);
188 		m = entries_spanned_by_off/QIB_SEGSZ;
189 		n = entries_spanned_by_off%QIB_SEGSZ;
190 	} else {
191 		m = 0;
192 		n = 0;
193 		while (off >= mr->map[m]->segs[n].length) {
194 			off -= mr->map[m]->segs[n].length;
195 			n++;
196 			if (n >= QIB_SEGSZ) {
197 				m++;
198 				n = 0;
199 			}
200 		}
201 	}
202 	isge->mr = mr;
203 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
204 	isge->length = mr->map[m]->segs[n].length - off;
205 	isge->sge_length = sge->length;
206 	isge->m = m;
207 	isge->n = n;
208 ok:
209 	return 1;
210 bail:
211 	spin_unlock_irqrestore(&rkt->lock, flags);
212 	return 0;
213 }
214 
215 /**
216  * qib_rkey_ok - check the IB virtual address, length, and RKEY
217  * @dev: infiniband device
218  * @ss: SGE state
219  * @len: length of data
220  * @vaddr: virtual address to place data
221  * @rkey: rkey to check
222  * @acc: access flags
223  *
224  * Return 1 if successful, otherwise 0.
225  */
qib_rkey_ok(struct qib_qp * qp,struct qib_sge * sge,u32 len,u64 vaddr,u32 rkey,int acc)226 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
227 		u32 len, u64 vaddr, u32 rkey, int acc)
228 {
229 	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
230 	struct qib_mregion *mr;
231 	unsigned n, m;
232 	size_t off;
233 	unsigned long flags;
234 
235 	/*
236 	 * We use RKEY == zero for kernel virtual addresses
237 	 * (see qib_get_dma_mr and qib_dma.c).
238 	 */
239 	spin_lock_irqsave(&rkt->lock, flags);
240 	if (rkey == 0) {
241 		struct qib_pd *pd = to_ipd(qp->ibqp.pd);
242 		struct qib_ibdev *dev = to_idev(pd->ibpd.device);
243 
244 		if (pd->user)
245 			goto bail;
246 		if (!dev->dma_mr)
247 			goto bail;
248 		atomic_inc(&dev->dma_mr->refcount);
249 		spin_unlock_irqrestore(&rkt->lock, flags);
250 
251 		sge->mr = dev->dma_mr;
252 		sge->vaddr = (void *) vaddr;
253 		sge->length = len;
254 		sge->sge_length = len;
255 		sge->m = 0;
256 		sge->n = 0;
257 		goto ok;
258 	}
259 
260 	mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
261 	if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
262 		goto bail;
263 
264 	off = vaddr - mr->iova;
265 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
266 		     (mr->access_flags & acc) == 0))
267 		goto bail;
268 	atomic_inc(&mr->refcount);
269 	spin_unlock_irqrestore(&rkt->lock, flags);
270 
271 	off += mr->offset;
272 	if (mr->page_shift) {
273 		/*
274 		page sizes are uniform power of 2 so no loop is necessary
275 		entries_spanned_by_off is the number of times the loop below
276 		would have executed.
277 		*/
278 		size_t entries_spanned_by_off;
279 
280 		entries_spanned_by_off = off >> mr->page_shift;
281 		off -= (entries_spanned_by_off << mr->page_shift);
282 		m = entries_spanned_by_off/QIB_SEGSZ;
283 		n = entries_spanned_by_off%QIB_SEGSZ;
284 	} else {
285 		m = 0;
286 		n = 0;
287 		while (off >= mr->map[m]->segs[n].length) {
288 			off -= mr->map[m]->segs[n].length;
289 			n++;
290 			if (n >= QIB_SEGSZ) {
291 				m++;
292 				n = 0;
293 			}
294 		}
295 	}
296 	sge->mr = mr;
297 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
298 	sge->length = mr->map[m]->segs[n].length - off;
299 	sge->sge_length = len;
300 	sge->m = m;
301 	sge->n = n;
302 ok:
303 	return 1;
304 bail:
305 	spin_unlock_irqrestore(&rkt->lock, flags);
306 	return 0;
307 }
308 
309 /*
310  * Initialize the memory region specified by the work reqeust.
311  */
qib_fast_reg_mr(struct qib_qp * qp,struct ib_send_wr * wr)312 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
313 {
314 	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
315 	struct qib_pd *pd = to_ipd(qp->ibqp.pd);
316 	struct qib_mregion *mr;
317 	u32 rkey = wr->wr.fast_reg.rkey;
318 	unsigned i, n, m;
319 	int ret = -EINVAL;
320 	unsigned long flags;
321 	u64 *page_list;
322 	size_t ps;
323 
324 	spin_lock_irqsave(&rkt->lock, flags);
325 	if (pd->user || rkey == 0)
326 		goto bail;
327 
328 	mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
329 	if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
330 		goto bail;
331 
332 	if (wr->wr.fast_reg.page_list_len > mr->max_segs)
333 		goto bail;
334 
335 	ps = 1UL << wr->wr.fast_reg.page_shift;
336 	if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
337 		goto bail;
338 
339 	mr->user_base = wr->wr.fast_reg.iova_start;
340 	mr->iova = wr->wr.fast_reg.iova_start;
341 	mr->lkey = rkey;
342 	mr->length = wr->wr.fast_reg.length;
343 	mr->access_flags = wr->wr.fast_reg.access_flags;
344 	page_list = wr->wr.fast_reg.page_list->page_list;
345 	m = 0;
346 	n = 0;
347 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
348 		mr->map[m]->segs[n].vaddr = (void *) page_list[i];
349 		mr->map[m]->segs[n].length = ps;
350 		if (++n == QIB_SEGSZ) {
351 			m++;
352 			n = 0;
353 		}
354 	}
355 
356 	ret = 0;
357 bail:
358 	spin_unlock_irqrestore(&rkt->lock, flags);
359 	return ret;
360 }
361