1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <rdma/ib_verbs.h>
44
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
48
49 #define USNIC_UIOM_PAGE_CHUNK \
50 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
51 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
52 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
53
usnic_uiom_dma_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * token)54 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
55 struct device *dev,
56 unsigned long iova, int flags,
57 void *token)
58 {
59 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
60 dev_name(dev),
61 domain, iova, flags);
62 return -ENOSYS;
63 }
64
usnic_uiom_put_pages(struct list_head * chunk_list,int dirty)65 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
66 {
67 struct usnic_uiom_chunk *chunk, *tmp;
68 struct page *page;
69 struct scatterlist *sg;
70 int i;
71 dma_addr_t pa;
72
73 list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
74 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
75 page = sg_page(sg);
76 pa = sg_phys(sg);
77 unpin_user_pages_dirty_lock(&page, 1, dirty);
78 usnic_dbg("pa: %pa\n", &pa);
79 }
80 kfree(chunk);
81 }
82 }
83
usnic_uiom_get_pages(unsigned long addr,size_t size,int writable,int dmasync,struct usnic_uiom_reg * uiomr)84 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
85 int dmasync, struct usnic_uiom_reg *uiomr)
86 {
87 struct list_head *chunk_list = &uiomr->chunk_list;
88 struct page **page_list;
89 struct scatterlist *sg;
90 struct usnic_uiom_chunk *chunk;
91 unsigned long locked;
92 unsigned long lock_limit;
93 unsigned long cur_base;
94 unsigned long npages;
95 int ret;
96 int off;
97 int i;
98 int flags;
99 dma_addr_t pa;
100 unsigned int gup_flags;
101 struct mm_struct *mm;
102
103 /*
104 * If the combination of the addr and size requested for this memory
105 * region causes an integer overflow, return error.
106 */
107 if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
108 return -EINVAL;
109
110 if (!size)
111 return -EINVAL;
112
113 if (!can_do_mlock())
114 return -EPERM;
115
116 INIT_LIST_HEAD(chunk_list);
117
118 page_list = (struct page **) __get_free_page(GFP_KERNEL);
119 if (!page_list)
120 return -ENOMEM;
121
122 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
123
124 uiomr->owning_mm = mm = current->mm;
125 mmap_read_lock(mm);
126
127 locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
128 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
129
130 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
131 ret = -ENOMEM;
132 goto out;
133 }
134
135 flags = IOMMU_READ | IOMMU_CACHE;
136 flags |= (writable) ? IOMMU_WRITE : 0;
137 gup_flags = FOLL_WRITE;
138 gup_flags |= (writable) ? 0 : FOLL_FORCE;
139 cur_base = addr & PAGE_MASK;
140 ret = 0;
141
142 while (npages) {
143 ret = pin_user_pages(cur_base,
144 min_t(unsigned long, npages,
145 PAGE_SIZE / sizeof(struct page *)),
146 gup_flags | FOLL_LONGTERM,
147 page_list, NULL);
148
149 if (ret < 0)
150 goto out;
151
152 npages -= ret;
153 off = 0;
154
155 while (ret) {
156 chunk = kmalloc(struct_size(chunk, page_list,
157 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
158 GFP_KERNEL);
159 if (!chunk) {
160 ret = -ENOMEM;
161 goto out;
162 }
163
164 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
165 sg_init_table(chunk->page_list, chunk->nents);
166 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
167 sg_set_page(sg, page_list[i + off],
168 PAGE_SIZE, 0);
169 pa = sg_phys(sg);
170 usnic_dbg("va: 0x%lx pa: %pa\n",
171 cur_base + i*PAGE_SIZE, &pa);
172 }
173 cur_base += chunk->nents * PAGE_SIZE;
174 ret -= chunk->nents;
175 off += chunk->nents;
176 list_add_tail(&chunk->list, chunk_list);
177 }
178
179 ret = 0;
180 }
181
182 out:
183 if (ret < 0) {
184 usnic_uiom_put_pages(chunk_list, 0);
185 atomic64_sub(npages, ¤t->mm->pinned_vm);
186 } else
187 mmgrab(uiomr->owning_mm);
188
189 mmap_read_unlock(mm);
190 free_page((unsigned long) page_list);
191 return ret;
192 }
193
usnic_uiom_unmap_sorted_intervals(struct list_head * intervals,struct usnic_uiom_pd * pd)194 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
195 struct usnic_uiom_pd *pd)
196 {
197 struct usnic_uiom_interval_node *interval, *tmp;
198 long unsigned va, size;
199
200 list_for_each_entry_safe(interval, tmp, intervals, link) {
201 va = interval->start << PAGE_SHIFT;
202 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
203 while (size > 0) {
204 /* Workaround for RH 970401 */
205 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
206 iommu_unmap(pd->domain, va, PAGE_SIZE);
207 va += PAGE_SIZE;
208 size -= PAGE_SIZE;
209 }
210 }
211 }
212
__usnic_uiom_reg_release(struct usnic_uiom_pd * pd,struct usnic_uiom_reg * uiomr,int dirty)213 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
214 struct usnic_uiom_reg *uiomr,
215 int dirty)
216 {
217 int npages;
218 unsigned long vpn_start, vpn_last;
219 struct usnic_uiom_interval_node *interval, *tmp;
220 int writable = 0;
221 LIST_HEAD(rm_intervals);
222
223 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
224 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
225 vpn_last = vpn_start + npages - 1;
226
227 spin_lock(&pd->lock);
228 usnic_uiom_remove_interval(&pd->root, vpn_start,
229 vpn_last, &rm_intervals);
230 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
231
232 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
233 if (interval->flags & IOMMU_WRITE)
234 writable = 1;
235 list_del(&interval->link);
236 kfree(interval);
237 }
238
239 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
240 spin_unlock(&pd->lock);
241 }
242
usnic_uiom_map_sorted_intervals(struct list_head * intervals,struct usnic_uiom_reg * uiomr)243 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
244 struct usnic_uiom_reg *uiomr)
245 {
246 int i, err;
247 size_t size;
248 struct usnic_uiom_chunk *chunk;
249 struct usnic_uiom_interval_node *interval_node;
250 dma_addr_t pa;
251 dma_addr_t pa_start = 0;
252 dma_addr_t pa_end = 0;
253 long int va_start = -EINVAL;
254 struct usnic_uiom_pd *pd = uiomr->pd;
255 long int va = uiomr->va & PAGE_MASK;
256 int flags = IOMMU_READ | IOMMU_CACHE;
257
258 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
259 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
260 list);
261 list_for_each_entry(interval_node, intervals, link) {
262 iter_chunk:
263 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
264 pa = sg_phys(&chunk->page_list[i]);
265 if ((va >> PAGE_SHIFT) < interval_node->start)
266 continue;
267
268 if ((va >> PAGE_SHIFT) == interval_node->start) {
269 /* First page of the interval */
270 va_start = va;
271 pa_start = pa;
272 pa_end = pa;
273 }
274
275 WARN_ON(va_start == -EINVAL);
276
277 if ((pa_end + PAGE_SIZE != pa) &&
278 (pa != pa_start)) {
279 /* PAs are not contiguous */
280 size = pa_end - pa_start + PAGE_SIZE;
281 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
282 va_start, &pa_start, size, flags);
283 err = iommu_map(pd->domain, va_start, pa_start,
284 size, flags);
285 if (err) {
286 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
287 va_start, &pa_start, size, err);
288 goto err_out;
289 }
290 va_start = va;
291 pa_start = pa;
292 pa_end = pa;
293 }
294
295 if ((va >> PAGE_SHIFT) == interval_node->last) {
296 /* Last page of the interval */
297 size = pa - pa_start + PAGE_SIZE;
298 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
299 va_start, &pa_start, size, flags);
300 err = iommu_map(pd->domain, va_start, pa_start,
301 size, flags);
302 if (err) {
303 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
304 va_start, &pa_start, size, err);
305 goto err_out;
306 }
307 break;
308 }
309
310 if (pa != pa_start)
311 pa_end += PAGE_SIZE;
312 }
313
314 if (i == chunk->nents) {
315 /*
316 * Hit last entry of the chunk,
317 * hence advance to next chunk
318 */
319 chunk = list_first_entry(&chunk->list,
320 struct usnic_uiom_chunk,
321 list);
322 goto iter_chunk;
323 }
324 }
325
326 return 0;
327
328 err_out:
329 usnic_uiom_unmap_sorted_intervals(intervals, pd);
330 return err;
331 }
332
usnic_uiom_reg_get(struct usnic_uiom_pd * pd,unsigned long addr,size_t size,int writable,int dmasync)333 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
334 unsigned long addr, size_t size,
335 int writable, int dmasync)
336 {
337 struct usnic_uiom_reg *uiomr;
338 unsigned long va_base, vpn_start, vpn_last;
339 unsigned long npages;
340 int offset, err;
341 LIST_HEAD(sorted_diff_intervals);
342
343 /*
344 * Intel IOMMU map throws an error if a translation entry is
345 * changed from read to write. This module may not unmap
346 * and then remap the entry after fixing the permission
347 * b/c this open up a small windows where hw DMA may page fault
348 * Hence, make all entries to be writable.
349 */
350 writable = 1;
351
352 va_base = addr & PAGE_MASK;
353 offset = addr & ~PAGE_MASK;
354 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
355 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
356 vpn_last = vpn_start + npages - 1;
357
358 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
359 if (!uiomr)
360 return ERR_PTR(-ENOMEM);
361
362 uiomr->va = va_base;
363 uiomr->offset = offset;
364 uiomr->length = size;
365 uiomr->writable = writable;
366 uiomr->pd = pd;
367
368 err = usnic_uiom_get_pages(addr, size, writable, dmasync,
369 uiomr);
370 if (err) {
371 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
372 vpn_start, vpn_last, err);
373 goto out_free_uiomr;
374 }
375
376 spin_lock(&pd->lock);
377 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
378 (writable) ? IOMMU_WRITE : 0,
379 IOMMU_WRITE,
380 &pd->root,
381 &sorted_diff_intervals);
382 if (err) {
383 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
384 vpn_start, vpn_last, err);
385 goto out_put_pages;
386 }
387
388 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
389 if (err) {
390 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
391 vpn_start, vpn_last, err);
392 goto out_put_intervals;
393
394 }
395
396 err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
397 (writable) ? IOMMU_WRITE : 0);
398 if (err) {
399 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
400 vpn_start, vpn_last, err);
401 goto out_unmap_intervals;
402 }
403
404 usnic_uiom_put_interval_set(&sorted_diff_intervals);
405 spin_unlock(&pd->lock);
406
407 return uiomr;
408
409 out_unmap_intervals:
410 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
411 out_put_intervals:
412 usnic_uiom_put_interval_set(&sorted_diff_intervals);
413 out_put_pages:
414 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
415 spin_unlock(&pd->lock);
416 mmdrop(uiomr->owning_mm);
417 out_free_uiomr:
418 kfree(uiomr);
419 return ERR_PTR(err);
420 }
421
__usnic_uiom_release_tail(struct usnic_uiom_reg * uiomr)422 static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
423 {
424 mmdrop(uiomr->owning_mm);
425 kfree(uiomr);
426 }
427
usnic_uiom_num_pages(struct usnic_uiom_reg * uiomr)428 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
429 {
430 return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
431 }
432
usnic_uiom_reg_release(struct usnic_uiom_reg * uiomr)433 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
434 {
435 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
436
437 atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
438 __usnic_uiom_release_tail(uiomr);
439 }
440
usnic_uiom_alloc_pd(struct device * dev)441 struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev)
442 {
443 struct usnic_uiom_pd *pd;
444 void *domain;
445
446 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
447 if (!pd)
448 return ERR_PTR(-ENOMEM);
449
450 pd->domain = domain = iommu_domain_alloc(dev->bus);
451 if (!domain) {
452 usnic_err("Failed to allocate IOMMU domain");
453 kfree(pd);
454 return ERR_PTR(-ENOMEM);
455 }
456
457 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
458
459 spin_lock_init(&pd->lock);
460 INIT_LIST_HEAD(&pd->devs);
461
462 return pd;
463 }
464
usnic_uiom_dealloc_pd(struct usnic_uiom_pd * pd)465 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
466 {
467 iommu_domain_free(pd->domain);
468 kfree(pd);
469 }
470
usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd * pd,struct device * dev)471 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
472 {
473 struct usnic_uiom_dev *uiom_dev;
474 int err;
475
476 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
477 if (!uiom_dev)
478 return -ENOMEM;
479 uiom_dev->dev = dev;
480
481 err = iommu_attach_device(pd->domain, dev);
482 if (err)
483 goto out_free_dev;
484
485 if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
486 usnic_err("IOMMU of %s does not support cache coherency\n",
487 dev_name(dev));
488 err = -EINVAL;
489 goto out_detach_device;
490 }
491
492 spin_lock(&pd->lock);
493 list_add_tail(&uiom_dev->link, &pd->devs);
494 pd->dev_cnt++;
495 spin_unlock(&pd->lock);
496
497 return 0;
498
499 out_detach_device:
500 iommu_detach_device(pd->domain, dev);
501 out_free_dev:
502 kfree(uiom_dev);
503 return err;
504 }
505
usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd * pd,struct device * dev)506 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
507 {
508 struct usnic_uiom_dev *uiom_dev;
509 int found = 0;
510
511 spin_lock(&pd->lock);
512 list_for_each_entry(uiom_dev, &pd->devs, link) {
513 if (uiom_dev->dev == dev) {
514 found = 1;
515 break;
516 }
517 }
518
519 if (!found) {
520 usnic_err("Unable to free dev %s - not found\n",
521 dev_name(dev));
522 spin_unlock(&pd->lock);
523 return;
524 }
525
526 list_del(&uiom_dev->link);
527 pd->dev_cnt--;
528 spin_unlock(&pd->lock);
529
530 return iommu_detach_device(pd->domain, dev);
531 }
532
usnic_uiom_get_dev_list(struct usnic_uiom_pd * pd)533 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
534 {
535 struct usnic_uiom_dev *uiom_dev;
536 struct device **devs;
537 int i = 0;
538
539 spin_lock(&pd->lock);
540 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
541 if (!devs) {
542 devs = ERR_PTR(-ENOMEM);
543 goto out;
544 }
545
546 list_for_each_entry(uiom_dev, &pd->devs, link) {
547 devs[i++] = uiom_dev->dev;
548 }
549 out:
550 spin_unlock(&pd->lock);
551 return devs;
552 }
553
usnic_uiom_free_dev_list(struct device ** devs)554 void usnic_uiom_free_dev_list(struct device **devs)
555 {
556 kfree(devs);
557 }
558