1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/bitmap.h>
36 
37 #include "mthca_dev.h"
38 
39 /* Trivial bitmap-based allocator */
mthca_alloc(struct mthca_alloc * alloc)40 u32 mthca_alloc(struct mthca_alloc *alloc)
41 {
42 	unsigned long flags;
43 	u32 obj;
44 
45 	spin_lock_irqsave(&alloc->lock, flags);
46 
47 	obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
48 	if (obj >= alloc->max) {
49 		alloc->top = (alloc->top + alloc->max) & alloc->mask;
50 		obj = find_first_zero_bit(alloc->table, alloc->max);
51 	}
52 
53 	if (obj < alloc->max) {
54 		set_bit(obj, alloc->table);
55 		obj |= alloc->top;
56 	} else
57 		obj = -1;
58 
59 	spin_unlock_irqrestore(&alloc->lock, flags);
60 
61 	return obj;
62 }
63 
mthca_free(struct mthca_alloc * alloc,u32 obj)64 void mthca_free(struct mthca_alloc *alloc, u32 obj)
65 {
66 	unsigned long flags;
67 
68 	obj &= alloc->max - 1;
69 
70 	spin_lock_irqsave(&alloc->lock, flags);
71 
72 	clear_bit(obj, alloc->table);
73 	alloc->last = min(alloc->last, obj);
74 	alloc->top = (alloc->top + alloc->max) & alloc->mask;
75 
76 	spin_unlock_irqrestore(&alloc->lock, flags);
77 }
78 
mthca_alloc_init(struct mthca_alloc * alloc,u32 num,u32 mask,u32 reserved)79 int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
80 		     u32 reserved)
81 {
82 	int i;
83 
84 	/* num must be a power of 2 */
85 	if (num != 1 << (ffs(num) - 1))
86 		return -EINVAL;
87 
88 	alloc->last = 0;
89 	alloc->top  = 0;
90 	alloc->max  = num;
91 	alloc->mask = mask;
92 	spin_lock_init(&alloc->lock);
93 	alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
94 			       GFP_KERNEL);
95 	if (!alloc->table)
96 		return -ENOMEM;
97 
98 	bitmap_zero(alloc->table, num);
99 	for (i = 0; i < reserved; ++i)
100 		set_bit(i, alloc->table);
101 
102 	return 0;
103 }
104 
mthca_alloc_cleanup(struct mthca_alloc * alloc)105 void mthca_alloc_cleanup(struct mthca_alloc *alloc)
106 {
107 	kfree(alloc->table);
108 }
109 
110 /*
111  * Array of pointers with lazy allocation of leaf pages.  Callers of
112  * _get, _set and _clear methods must use a lock or otherwise
113  * serialize access to the array.
114  */
115 
116 #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
117 
mthca_array_get(struct mthca_array * array,int index)118 void *mthca_array_get(struct mthca_array *array, int index)
119 {
120 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
121 
122 	if (array->page_list[p].page)
123 		return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
124 	else
125 		return NULL;
126 }
127 
mthca_array_set(struct mthca_array * array,int index,void * value)128 int mthca_array_set(struct mthca_array *array, int index, void *value)
129 {
130 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
131 
132 	/* Allocate with GFP_ATOMIC because we'll be called with locks held. */
133 	if (!array->page_list[p].page)
134 		array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
135 
136 	if (!array->page_list[p].page)
137 		return -ENOMEM;
138 
139 	array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
140 	++array->page_list[p].used;
141 
142 	return 0;
143 }
144 
mthca_array_clear(struct mthca_array * array,int index)145 void mthca_array_clear(struct mthca_array *array, int index)
146 {
147 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
148 
149 	if (--array->page_list[p].used == 0) {
150 		free_page((unsigned long) array->page_list[p].page);
151 		array->page_list[p].page = NULL;
152 	} else
153 		array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
154 
155 	if (array->page_list[p].used < 0)
156 		pr_debug("Array %p index %d page %d with ref count %d < 0\n",
157 			 array, index, p, array->page_list[p].used);
158 }
159 
mthca_array_init(struct mthca_array * array,int nent)160 int mthca_array_init(struct mthca_array *array, int nent)
161 {
162 	int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
163 	int i;
164 
165 	array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL);
166 	if (!array->page_list)
167 		return -ENOMEM;
168 
169 	for (i = 0; i < npage; ++i) {
170 		array->page_list[i].page = NULL;
171 		array->page_list[i].used = 0;
172 	}
173 
174 	return 0;
175 }
176 
mthca_array_cleanup(struct mthca_array * array,int nent)177 void mthca_array_cleanup(struct mthca_array *array, int nent)
178 {
179 	int i;
180 
181 	for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
182 		free_page((unsigned long) array->page_list[i].page);
183 
184 	kfree(array->page_list);
185 }
186 
187 /*
188  * Handling for queue buffers -- we allocate a bunch of memory and
189  * register it in a memory region at HCA virtual address 0.  If the
190  * requested size is > max_direct, we split the allocation into
191  * multiple pages, so we don't require too much contiguous memory.
192  */
193 
mthca_buf_alloc(struct mthca_dev * dev,int size,int max_direct,union mthca_buf * buf,int * is_direct,struct mthca_pd * pd,int hca_write,struct mthca_mr * mr)194 int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
195 		    union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
196 		    int hca_write, struct mthca_mr *mr)
197 {
198 	int err = -ENOMEM;
199 	int npages, shift;
200 	u64 *dma_list = NULL;
201 	dma_addr_t t;
202 	int i;
203 
204 	if (size <= max_direct) {
205 		*is_direct = 1;
206 		npages     = 1;
207 		shift      = get_order(size) + PAGE_SHIFT;
208 
209 		buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
210 						     size, &t, GFP_KERNEL);
211 		if (!buf->direct.buf)
212 			return -ENOMEM;
213 
214 		dma_unmap_addr_set(&buf->direct, mapping, t);
215 
216 		memset(buf->direct.buf, 0, size);
217 
218 		while (t & ((1 << shift) - 1)) {
219 			--shift;
220 			npages *= 2;
221 		}
222 
223 		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
224 		if (!dma_list)
225 			goto err_free;
226 
227 		for (i = 0; i < npages; ++i)
228 			dma_list[i] = t + i * (1 << shift);
229 	} else {
230 		*is_direct = 0;
231 		npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
232 		shift      = PAGE_SHIFT;
233 
234 		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
235 		if (!dma_list)
236 			return -ENOMEM;
237 
238 		buf->page_list = kmalloc(npages * sizeof *buf->page_list,
239 					 GFP_KERNEL);
240 		if (!buf->page_list)
241 			goto err_out;
242 
243 		for (i = 0; i < npages; ++i)
244 			buf->page_list[i].buf = NULL;
245 
246 		for (i = 0; i < npages; ++i) {
247 			buf->page_list[i].buf =
248 				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
249 						   &t, GFP_KERNEL);
250 			if (!buf->page_list[i].buf)
251 				goto err_free;
252 
253 			dma_list[i] = t;
254 			dma_unmap_addr_set(&buf->page_list[i], mapping, t);
255 
256 			clear_page(buf->page_list[i].buf);
257 		}
258 	}
259 
260 	err = mthca_mr_alloc_phys(dev, pd->pd_num,
261 				  dma_list, shift, npages,
262 				  0, size,
263 				  MTHCA_MPT_FLAG_LOCAL_READ |
264 				  (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
265 				  mr);
266 	if (err)
267 		goto err_free;
268 
269 	kfree(dma_list);
270 
271 	return 0;
272 
273 err_free:
274 	mthca_buf_free(dev, size, buf, *is_direct, NULL);
275 
276 err_out:
277 	kfree(dma_list);
278 
279 	return err;
280 }
281 
mthca_buf_free(struct mthca_dev * dev,int size,union mthca_buf * buf,int is_direct,struct mthca_mr * mr)282 void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
283 		    int is_direct, struct mthca_mr *mr)
284 {
285 	int i;
286 
287 	if (mr)
288 		mthca_free_mr(dev, mr);
289 
290 	if (is_direct)
291 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
292 				  dma_unmap_addr(&buf->direct, mapping));
293 	else {
294 		for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
295 			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
296 					  buf->page_list[i].buf,
297 					  dma_unmap_addr(&buf->page_list[i],
298 							 mapping));
299 		kfree(buf->page_list);
300 	}
301 }
302